refactor, bypass dns filtering using google dns, yts host resolution using qwant
This commit is contained in:
parent
6099b72570
commit
1fd24d0536
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,3 +3,4 @@ flixtorrentz-api.iml
|
|||||||
build
|
build
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.torrent.bolt.db
|
.torrent.bolt.db
|
||||||
|
ytsflix.iml
|
||||||
|
15
Dockerfile
15
Dockerfile
@ -2,15 +2,20 @@
|
|||||||
FROM golang:alpine as builder
|
FROM golang:alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache git ca-certificates && \
|
RUN apk add --no-cache git ca-certificates && \
|
||||||
mkdir -p $GOPATH/src/git.adphi.net/Adphi/ytsflix && \
|
|
||||||
mkdir /build
|
mkdir /build
|
||||||
|
|
||||||
WORKDIR $GOPATH/src/git.adphi.net/Adphi/ytsflix
|
WORKDIR /ytsflix
|
||||||
ADD . .
|
|
||||||
|
COPY go.mod .
|
||||||
|
|
||||||
RUN go get -u github.com/gobuffalo/packr/... && \
|
RUN go get -u github.com/gobuffalo/packr/... && \
|
||||||
packr && \
|
packr
|
||||||
CGO_ENABLED=0 go build -v -o /build/ytsflix cmd/main.go
|
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
ADD . .
|
||||||
|
|
||||||
|
RUN CGO_ENABLED=0 packr build -v -o /build/ytsflix cmd/ytsflix/main.go
|
||||||
|
|
||||||
# Create Container's Image
|
# Create Container's Image
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
11
Makefile
11
Makefile
@ -1,11 +1,11 @@
|
|||||||
PROJECT_NAME=ytsflix
|
PROJECT_NAME=ytsflix
|
||||||
PKG := git.adphi.net/Adphi/$(PROJECT_NAME)
|
PKG := git.adphi.net/adphi/$(PROJECT_NAME)
|
||||||
VERSION=0.0.2
|
VERSION=0.0.2
|
||||||
GOPATH:=$(shell go env GOPATH)
|
GOPATH:=$(shell go env GOPATH)
|
||||||
|
|
||||||
.PHONY: tests deps build docker push coverage
|
.PHONY: tests deps build docker push coverage
|
||||||
|
|
||||||
all: deps tests coverage build docker push## Run all commands except docker
|
all: tests coverage build docker push## Run all commands except docker
|
||||||
all-docker: docker push ## Run all docker commands
|
all-docker: docker push ## Run all docker commands
|
||||||
|
|
||||||
|
|
||||||
@ -18,18 +18,13 @@ coverage: ## Generate global code coverage report
|
|||||||
go tool cover -html=cover/${PROJECT_NAME}cov -o coverage.html
|
go tool cover -html=cover/${PROJECT_NAME}cov -o coverage.html
|
||||||
|
|
||||||
|
|
||||||
docker: deps ## Build Docker images
|
docker: ## Build Docker images
|
||||||
@docker build -f Dockerfile -t adphi/${PROJECT_NAME}:latest -t adphi/${PROJECT_NAME}:${VERSION} .
|
@docker build -f Dockerfile -t adphi/${PROJECT_NAME}:latest -t adphi/${PROJECT_NAME}:${VERSION} .
|
||||||
|
|
||||||
push : ## Push Docker images
|
push : ## Push Docker images
|
||||||
@docker push adphi/${PROJECT_NAME}:latest
|
@docker push adphi/${PROJECT_NAME}:latest
|
||||||
@docker push adphi/${PROJECT_NAME}:${VERSION}
|
@docker push adphi/${PROJECT_NAME}:${VERSION}
|
||||||
|
|
||||||
deps: ## Get dependencies
|
|
||||||
@rm -rf vendor
|
|
||||||
@govendor init
|
|
||||||
@govendor add +external
|
|
||||||
|
|
||||||
build: ## Build service
|
build: ## Build service
|
||||||
@go build -v -o build/${PROJECT_NAME} cmd/main.go
|
@go build -v -o build/${PROJECT_NAME} cmd/main.go
|
||||||
|
|
||||||
|
@ -2,12 +2,14 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.adphi.net/Adphi/ytsflix/handler"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"git.adphi.net/adphi/ytsflix/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
@ -1,16 +1,17 @@
|
|||||||
package handler
|
package engine
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent"
|
"github.com/anacrolix/torrent"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"os"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Engine struct {
|
type Engine struct {
|
||||||
client *torrent.Client
|
client *torrent.Client
|
||||||
files map[string]*MovieTorrent
|
files map[string]*MovieTorrent
|
||||||
storagePath string
|
StoragePath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEngine(storagePath string) (*Engine, error) {
|
func NewEngine(storagePath string) (*Engine, error) {
|
||||||
@ -59,7 +60,7 @@ func (e *Engine) Download(magnet string) (*MovieTorrent, error) {
|
|||||||
<-t.GotInfo()
|
<-t.GotInfo()
|
||||||
file := getLargestFile(t)
|
file := getLargestFile(t)
|
||||||
file.Download()
|
file.Download()
|
||||||
mt := newMovieTorrent(file, t)
|
mt := NewMovieTorrent(file, t)
|
||||||
mt.engine = e
|
mt.engine = e
|
||||||
e.files[mt.FileName] = mt
|
e.files[mt.FileName] = mt
|
||||||
logrus.Infof("Downloading: %s", mt.FileName)
|
logrus.Infof("Downloading: %s", mt.FileName)
|
@ -1,18 +1,19 @@
|
|||||||
package handler
|
package engine
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/anacrolix/torrent"
|
|
||||||
"github.com/asticode/go-astisub"
|
|
||||||
"github.com/odwrtw/yifysubs"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"gopkg.in/h2non/filetype.v1"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/anacrolix/torrent"
|
||||||
|
"github.com/asticode/go-astisub"
|
||||||
|
"github.com/odwrtw/yifysubs"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/h2non/filetype.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MovieTorrent struct {
|
type MovieTorrent struct {
|
||||||
@ -24,7 +25,7 @@ type MovieTorrent struct {
|
|||||||
engine *Engine
|
engine *Engine
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMovieTorrent(file *torrent.File, t *torrent.Torrent) *MovieTorrent {
|
func NewMovieTorrent(file *torrent.File, t *torrent.Torrent) *MovieTorrent {
|
||||||
fileName := filepath.Base(file.Path())
|
fileName := filepath.Base(file.Path())
|
||||||
ext := strings.SplitAfter(path.Ext(fileName), ".")[1]
|
ext := strings.SplitAfter(path.Ext(fileName), ".")[1]
|
||||||
MIME := filetype.GetType(ext).MIME.Value
|
MIME := filetype.GetType(ext).MIME.Value
|
||||||
@ -100,7 +101,7 @@ func (m *MovieTorrent) DownloadSubtitles(imdbID string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.Subtitles = map[string]string{}
|
m.Subtitles = map[string]string{}
|
||||||
basePath := filepath.Join(m.engine.storagePath, filepath.Dir(m.file.Path()))
|
basePath := filepath.Join(m.engine.StoragePath, filepath.Dir(m.file.Path()))
|
||||||
logrus.Debugf("Subtitles base path: %s", basePath)
|
logrus.Debugf("Subtitles base path: %s", basePath)
|
||||||
if _, err := os.Stat(basePath); os.IsNotExist(err) {
|
if _, err := os.Stat(basePath); os.IsNotExist(err) {
|
||||||
os.Mkdir(basePath, os.ModePerm)
|
os.Mkdir(basePath, os.ModePerm)
|
46
go.mod
Normal file
46
go.mod
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
module git.adphi.net/adphi/ytsflix
|
||||||
|
|
||||||
|
go 1.14
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/PuerkitoBio/goquery v1.4.1 // indirect
|
||||||
|
github.com/RoaringBitmap/roaring v0.4.17-0.20180820165455-627b662e6d11 // indirect
|
||||||
|
github.com/anacrolix/dht v0.0.0-20180808005204-cae37fd18420 // indirect
|
||||||
|
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05 // indirect
|
||||||
|
github.com/anacrolix/log v0.0.0-20180808012509-286fcf906b48 // indirect
|
||||||
|
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0 // indirect
|
||||||
|
github.com/anacrolix/sync v0.1.0 // indirect
|
||||||
|
github.com/anacrolix/torrent v0.0.0-20180903014831-3ed340c0e005
|
||||||
|
github.com/andybalholm/cascadia v1.0.0 // indirect
|
||||||
|
github.com/asticode/go-astilog v0.0.0-20181027121212-2f5c6b0d3c74 // indirect
|
||||||
|
github.com/asticode/go-astisub v0.0.0-20180728145013-146a999907c1
|
||||||
|
github.com/asticode/go-astitools v0.0.0-20181027135708-933b5f686c38 // indirect
|
||||||
|
github.com/asticode/go-astits v0.0.0-20180225134227-3bec246caf60 // indirect
|
||||||
|
github.com/boltdb/bolt v1.3.2-0.20180302180052-fd01fc79c553 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.0
|
||||||
|
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect
|
||||||
|
github.com/gobuffalo/packr v1.13.7
|
||||||
|
github.com/google/btree v1.0.0 // indirect
|
||||||
|
github.com/gorilla/context v1.1.1 // indirect
|
||||||
|
github.com/gorilla/mux v1.6.3-0.20180807075256-e48e440e4c92
|
||||||
|
github.com/huandu/xstrings v1.2.1-0.20180906151751-8bbcf2f9ccb5 // indirect
|
||||||
|
github.com/jpillora/scraper v0.0.0-20171020112404-0b5e5ce320ff // indirect
|
||||||
|
github.com/levigross/grequests v0.0.0-20190908174114-253788527a1a
|
||||||
|
github.com/mattn/go-colorable v0.1.0 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 // indirect
|
||||||
|
github.com/mattn/go-sqlite3 v1.9.1-0.20180926090220-0a88db3545c4 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.0.1-0.20180928013848-ce2ff0c13ce5 // indirect
|
||||||
|
github.com/odwrtw/yifysubs v0.0.0-20180215170549-9ca769fb2f90
|
||||||
|
github.com/pkg/errors v0.8.1-0.20181023235946-059132a15dd0
|
||||||
|
github.com/rylio/ytdl v0.5.2-0.20180818171727-06f651094627
|
||||||
|
github.com/sirupsen/logrus v1.1.2-0.20181101075517-7eeb7b7cbdeb
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/stretchr/testify v1.2.2
|
||||||
|
github.com/tinylib/msgp v1.0.3-0.20180912221530-f65876d3ea05 // indirect
|
||||||
|
github.com/willf/bitset v1.1.9 // indirect
|
||||||
|
github.com/willf/bloom v2.0.3+incompatible // indirect
|
||||||
|
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b // indirect
|
||||||
|
golang.org/x/text v0.3.1-0.20181030141323-6f44c5a2ea40 // indirect
|
||||||
|
gopkg.in/h2non/filetype.v1 v1.0.5
|
||||||
|
)
|
178
go.sum
Normal file
178
go.sum
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||||
|
github.com/PuerkitoBio/goquery v1.4.1 h1:smcIRGdYm/w7JSbcdeLHEMzxmsBQvl8lhf0dSw2nzMI=
|
||||||
|
github.com/PuerkitoBio/goquery v1.4.1/go.mod h1:T9ezsOHcCrDCgA8aF1Cqr3sSYbO/xgdy8/R/XiIMAhA=
|
||||||
|
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||||
|
github.com/RoaringBitmap/roaring v0.4.17-0.20180820165455-627b662e6d11 h1:SpkWyOjzWt6lfzO/jXsGHqxCfVwZ4VUQTvmqz5H3MAw=
|
||||||
|
github.com/RoaringBitmap/roaring v0.4.17-0.20180820165455-627b662e6d11/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||||
|
github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI=
|
||||||
|
github.com/anacrolix/dht v0.0.0-20180808005204-cae37fd18420 h1:ithDtO3iPr41WPcUYtwzLvwQ+aleuPuHYSmhf6Rda+o=
|
||||||
|
github.com/anacrolix/dht v0.0.0-20180808005204-cae37fd18420/go.mod h1:Z/mP+H70xwnUv957UXVkTrzYRhKs0mGjS0lyqS331g4=
|
||||||
|
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa h1:xCaATLKmn39QqLs3tUZYr6eKvezJV+FYvVOLTklxK6U=
|
||||||
|
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||||
|
github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E=
|
||||||
|
github.com/anacrolix/go-libutp v0.0.0-20180725071407-34b43d880940/go.mod h1:szRMvnqPPC0VKbIRVoImO5gjJj8OZCYdixSrDEo1TzM=
|
||||||
|
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05 h1:Zoniih3jyqtr3I0xFoMvw1USWpg+CbI/zOrcLudr0lc=
|
||||||
|
github.com/anacrolix/go-libutp v0.0.0-20180808010927-aebbeb60ea05/go.mod h1:POY/GPlrFKRxnOKH1sGAB+NBWMoP+sI+hHJxgcgWbWw=
|
||||||
|
github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
|
||||||
|
github.com/anacrolix/log v0.0.0-20180808012509-286fcf906b48 h1:aHwCj9Q6sjQOq5sC8Bzylt7GCAvt+D5MPEpkD6TeXxU=
|
||||||
|
github.com/anacrolix/log v0.0.0-20180808012509-286fcf906b48/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
|
||||||
|
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk=
|
||||||
|
github.com/anacrolix/missinggo v0.0.0-20180621131740-7fc7cfea16ea/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
|
||||||
|
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df h1:+se8qhX5ivmSCkP+gZXyFx2ETjk1pmnrYJ0Iyc+hZKY=
|
||||||
|
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
|
||||||
|
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
|
||||||
|
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0 h1:Fa1XqqLW62lQzEDlNA+QcdJbkfJcxQN0YC8983kj5tU=
|
||||||
|
github.com/anacrolix/mmsg v0.0.0-20180808012353-5adb2c1127c0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc=
|
||||||
|
github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
||||||
|
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
||||||
|
github.com/anacrolix/sync v0.0.0-20180725074606-fda11526ff08/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
||||||
|
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
|
||||||
|
github.com/anacrolix/sync v0.1.0 h1:JTD3L2crIEA4v6U/ZA1xQ8Yl3IqD3FJJ//LSo/uat2M=
|
||||||
|
github.com/anacrolix/sync v0.1.0/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
|
||||||
|
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||||
|
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||||
|
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs=
|
||||||
|
github.com/anacrolix/torrent v0.0.0-20180903014831-3ed340c0e005 h1:G4LOcnw9vNmXZmDVofOJ3RAcsIkV6mrZdWKmzyk37uw=
|
||||||
|
github.com/anacrolix/torrent v0.0.0-20180903014831-3ed340c0e005/go.mod h1:F1piecUEeH/yZKCYsflSTxW5uiAacywaltgmO1atY6w=
|
||||||
|
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572 h1:kpt6TQTVi6gognY+svubHfxxpq0DLU9AfTQyZVc3UOc=
|
||||||
|
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
|
||||||
|
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
|
||||||
|
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||||
|
github.com/asticode/go-astilog v0.0.0-20181027121212-2f5c6b0d3c74 h1:ndtrdy7sydb/NgV711BztFsqv78+D3YOJJG2IJfU+6A=
|
||||||
|
github.com/asticode/go-astilog v0.0.0-20181027121212-2f5c6b0d3c74/go.mod h1:Ps37X/dJWJEfYRhZW+oSPV9Q2bJsZKeUbcELRGHHcTw=
|
||||||
|
github.com/asticode/go-astisub v0.0.0-20180728145013-146a999907c1 h1:rkGngkYuInfUN9lXuMFDIcQzXWsnx4aG9W9OXHuBFa4=
|
||||||
|
github.com/asticode/go-astisub v0.0.0-20180728145013-146a999907c1/go.mod h1:xeIVqWzWf4FmtftqxYgZccqq/ihzQifwXzNU8Wp0PUI=
|
||||||
|
github.com/asticode/go-astitools v0.0.0-20181027135708-933b5f686c38 h1:Wa5e1o9OkgwBGJpxCaTlemBXmMvsTHfRLBSKP1kGyAw=
|
||||||
|
github.com/asticode/go-astitools v0.0.0-20181027135708-933b5f686c38/go.mod h1:vPD76Qs1Diw7YLv8/lwOtAC84r0b3K5GU6tAxyrZuE0=
|
||||||
|
github.com/asticode/go-astits v0.0.0-20180225134227-3bec246caf60 h1:xpET8cFBb1vEC141W9w4wONf1t6wZ3jxdwxZe+ejCgE=
|
||||||
|
github.com/asticode/go-astits v0.0.0-20180225134227-3bec246caf60/go.mod h1:IO8rus+3gLhbOSb9nI40h7UIG5vh7T1LsmvyBvXZT20=
|
||||||
|
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||||
|
github.com/boltdb/bolt v1.3.2-0.20180302180052-fd01fc79c553 h1:JsFGvzmvh7HGD2Q56FkCtowlJyTJcesskDcqWMG0Zho=
|
||||||
|
github.com/boltdb/bolt v1.3.2-0.20180302180052-fd01fc79c553/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||||
|
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2 h1:1B/+1BcRhOMG1KH/YhNIU8OppSWk5d/NGyfRla88CuY=
|
||||||
|
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
|
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
|
||||||
|
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||||
|
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a h1:2Zw3pxDRTs4nX1WCLAEm27UN0hvjZSge7EaUUQexRZw=
|
||||||
|
github.com/elgatito/upnp v0.0.0-20180711183757-2f244d205f9a/go.mod h1:afkYpY8JAIL4341N7Zj9xJ5yTovsg6BkWfBFlCzIoF4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
|
||||||
|
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||||
|
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8=
|
||||||
|
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
|
github.com/gobuffalo/packr v1.13.7 h1:2uZgLd6b/W4yRBZV/ScaORxZLNGMHO0VCvqQNkKukNA=
|
||||||
|
github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24=
|
||||||
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||||
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||||
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f h1:FDM3EtwZLyhW48YRiyqjivNlNZjAObv4xt4NnJaU+NQ=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||||
|
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||||
|
github.com/gorilla/mux v1.6.3-0.20180807075256-e48e440e4c92 h1:CklngWcLxJpoFeXlOsTo6Mv4eIaLOzVel4fv34izz+U=
|
||||||
|
github.com/gorilla/mux v1.6.3-0.20180807075256-e48e440e4c92/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
|
github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
|
||||||
|
github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
|
||||||
|
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||||
|
github.com/huandu/xstrings v1.2.1-0.20180906151751-8bbcf2f9ccb5 h1:b7TOn7NRoUPI/+R6AVc3Qq6Rq3G1ov8HnlzxjuNUfgk=
|
||||||
|
github.com/huandu/xstrings v1.2.1-0.20180906151751-8bbcf2f9ccb5/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
|
github.com/jpillora/scraper v0.0.0-20171020112404-0b5e5ce320ff h1:iHiedmdUKhFUsiIS4RrvV1HvLParxSBlbAmQw7pRCG0=
|
||||||
|
github.com/jpillora/scraper v0.0.0-20171020112404-0b5e5ce320ff/go.mod h1:UEXx1EnNmV5QzTQZpHspc6/hcNzMKX/6LYAatwoyexI=
|
||||||
|
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||||
|
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/levigross/grequests v0.0.0-20190908174114-253788527a1a h1:DGFy/362j92vQRE3ThU1yqg9TuJS8YJOSbQuB7BP9cA=
|
||||||
|
github.com/levigross/grequests v0.0.0-20190908174114-253788527a1a/go.mod h1:jVntzcUU+2BtVohZBQmSHWUmh8B55LCNfPhcNCIvvIg=
|
||||||
|
github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o=
|
||||||
|
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4=
|
||||||
|
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/mattn/go-sqlite3 v1.9.1-0.20180926090220-0a88db3545c4 h1:w+lKHu/aaKSDCi5W7KqKACotyPVCXFdrB+SN5eTxwVM=
|
||||||
|
github.com/mattn/go-sqlite3 v1.9.1-0.20180926090220-0a88db3545c4/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/mitchellh/mapstructure v1.0.1-0.20180928013848-ce2ff0c13ce5 h1:xztkMrRiyr6vPZxSTi7HtZPMDWTQx+R7KrE1kKtqI64=
|
||||||
|
github.com/mitchellh/mapstructure v1.0.1-0.20180928013848-ce2ff0c13ce5/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
|
||||||
|
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||||
|
github.com/odwrtw/yifysubs v0.0.0-20180215170549-9ca769fb2f90 h1:kDkQUcuUQRhx/jIJtN0AFW5muMLgr8Z1ItbThnbIT8Q=
|
||||||
|
github.com/odwrtw/yifysubs v0.0.0-20180215170549-9ca769fb2f90/go.mod h1:9TPMeWCUplybvf4aCW6Hu3KwJian+oSeK6jZ597z9i4=
|
||||||
|
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
|
||||||
|
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||||
|
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
||||||
|
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1-0.20181023235946-059132a15dd0 h1:TVdhkEP0WKajbywS5TEDWwuzCl9EqOcQ6b1ymfmx/6E=
|
||||||
|
github.com/pkg/errors v0.8.1-0.20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rylio/ytdl v0.5.2-0.20180818171727-06f651094627 h1:n1x4E07Wxi/cnY+gXAPL6T9v7wATp4tHjNNVaWixqtQ=
|
||||||
|
github.com/rylio/ytdl v0.5.2-0.20180818171727-06f651094627/go.mod h1:95YUr8z28/4SbAtOMw027cd07GG2yt2cONPpSE7rUH4=
|
||||||
|
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||||
|
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||||
|
github.com/sirupsen/logrus v1.1.2-0.20181101075517-7eeb7b7cbdeb h1:Zs7k4rxIvfXFjav0ia3QqnauqGfG2hr8+y6NZYwq8iA=
|
||||||
|
github.com/sirupsen/logrus v1.1.2-0.20181101075517-7eeb7b7cbdeb/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180607162144-eb5b59917fa2 h1:hjkfjJKpNSPqJZJKSrHbQgBz+eEui8ivYlorRc9DR64=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180607162144-eb5b59917fa2/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo=
|
||||||
|
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||||
|
github.com/smartystreets/gunit v0.0.0-20180314194857-6f0d6275bdcd h1:p5kvxG4NHogJX1brTLtvUSGdW0/aBvIyqDSW7tmnsmQ=
|
||||||
|
github.com/smartystreets/gunit v0.0.0-20180314194857-6f0d6275bdcd/go.mod h1:XUKj4gbqj2QvJk/OdLWzyZ3FYli0f+MdpngyryX0gcw=
|
||||||
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
|
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
|
||||||
|
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
|
github.com/tinylib/msgp v1.0.3-0.20180912221530-f65876d3ea05 h1:ql7SaC+c7bXhc8CgsDV9Xo0lI5hDqfQ3vjAlO1NFLEE=
|
||||||
|
github.com/tinylib/msgp v1.0.3-0.20180912221530-f65876d3ea05/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
|
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||||
|
github.com/willf/bitset v1.1.9 h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c=
|
||||||
|
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||||
|
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||||
|
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
|
||||||
|
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b h1:IYiJPiJfzktmDAO1HQiwjMjwjlYKHAL7KzeD544RJPs=
|
||||||
|
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180724212812-e072cadbbdc8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20181030141323-6f44c5a2ea40 h1:BtUFROTRzj9Y8jXjyUJ4NogzXn+sSbruLbjIbdkd7VE=
|
||||||
|
golang.org/x/text v0.3.1-0.20181030141323-6f44c5a2ea40/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
gopkg.in/h2non/filetype.v1 v1.0.5 h1:CC1jjJjoEhNVbMhXYalmGBhOBK2V70Q1N850wt/98/Y=
|
||||||
|
gopkg.in/h2non/filetype.v1 v1.0.5/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo=
|
@ -2,18 +2,21 @@ package handler
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.adphi.net/Adphi/ytsflix/templates"
|
|
||||||
"git.adphi.net/Adphi/ytsflix/ytsclient"
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"gopkg.in/h2non/filetype.v1"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/h2non/filetype.v1"
|
||||||
|
|
||||||
|
"git.adphi.net/adphi/ytsflix/engine"
|
||||||
|
"git.adphi.net/adphi/ytsflix/templates"
|
||||||
|
"git.adphi.net/adphi/ytsflix/ytsclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -21,9 +24,9 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
engine *Engine
|
engine *engine.Engine
|
||||||
clients map[string]int
|
clients map[string]int
|
||||||
yts *ytsclient.Client
|
yts ytsclient.Client
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
storagePath string
|
storagePath string
|
||||||
}
|
}
|
||||||
@ -57,11 +60,11 @@ type MovieData struct {
|
|||||||
func NewHandler(storagePath string) (*Handler, error) {
|
func NewHandler(storagePath string) (*Handler, error) {
|
||||||
h := Handler{storagePath: storagePath}
|
h := Handler{storagePath: storagePath}
|
||||||
var err error
|
var err error
|
||||||
h.engine, err = NewEngine(storagePath)
|
h.engine, err = engine.NewEngine(storagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
h.yts = ytsclient.NewClient("https://yts.am/api/v2")
|
h.yts = ytsclient.NewClient()
|
||||||
h.clients = map[string]int{}
|
h.clients = map[string]int{}
|
||||||
return &h, nil
|
return &h, nil
|
||||||
}
|
}
|
||||||
@ -86,7 +89,7 @@ func (h *Handler) Home(writer http.ResponseWriter, request *http.Request) {
|
|||||||
Quality: ytsclient.Quality1080p,
|
Quality: ytsclient.Quality1080p,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sendError(writer, err)
|
logrus.WithField("genre", genre).Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ms := map[string]Movie{}
|
ms := map[string]Movie{}
|
||||||
@ -233,7 +236,7 @@ func (h *Handler) Torrents(writer http.ResponseWriter, request *http.Request) {
|
|||||||
|
|
||||||
func (h *Handler) Serve(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) Serve(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
var mt *MovieTorrent
|
var mt *engine.MovieTorrent
|
||||||
var err error
|
var err error
|
||||||
movie := vars["movie"]
|
movie := vars["movie"]
|
||||||
h.mutex.Lock()
|
h.mutex.Lock()
|
||||||
|
12
vendor/github.com/PuerkitoBio/goquery/LICENSE
generated
vendored
12
vendor/github.com/PuerkitoBio/goquery/LICENSE
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
Copyright (c) 2012-2016, Martin Angers & Contributors
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
177
vendor/github.com/PuerkitoBio/goquery/README.md
generated
vendored
177
vendor/github.com/PuerkitoBio/goquery/README.md
generated
vendored
@ -1,177 +0,0 @@
|
|||||||
# goquery - a little like that j-thing, only in Go
|
|
||||||
[![build status](https://secure.travis-ci.org/PuerkitoBio/goquery.svg?branch=master)](http://travis-ci.org/PuerkitoBio/goquery) [![GoDoc](https://godoc.org/github.com/PuerkitoBio/goquery?status.png)](http://godoc.org/github.com/PuerkitoBio/goquery) [![Sourcegraph Badge](https://sourcegraph.com/github.com/PuerkitoBio/goquery/-/badge.svg)](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
|
|
||||||
|
|
||||||
goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
|
|
||||||
|
|
||||||
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
|
|
||||||
|
|
||||||
Syntax-wise, it is as close as possible to jQuery, with the same function names when possible, and that warm and fuzzy chainable interface. jQuery being the ultra-popular library that it is, I felt that writing a similar HTML-manipulating library was better to follow its API than to start anew (in the same spirit as Go's `fmt` package), even though some of its methods are less than intuitive (looking at you, [index()][index]...).
|
|
||||||
|
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
* [Installation](#installation)
|
|
||||||
* [Changelog](#changelog)
|
|
||||||
* [API](#api)
|
|
||||||
* [Examples](#examples)
|
|
||||||
* [Related Projects](#related-projects)
|
|
||||||
* [Support](#support)
|
|
||||||
* [License](#license)
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Please note that because of the net/html dependency, goquery requires Go1.1+.
|
|
||||||
|
|
||||||
$ go get github.com/PuerkitoBio/goquery
|
|
||||||
|
|
||||||
(optional) To run unit tests:
|
|
||||||
|
|
||||||
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
|
|
||||||
$ go test
|
|
||||||
|
|
||||||
(optional) To run benchmarks (warning: it runs for a few minutes):
|
|
||||||
|
|
||||||
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
|
|
||||||
$ go test -bench=".*"
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
**Note that goquery's API is now stable, and will not break.**
|
|
||||||
|
|
||||||
* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
|
|
||||||
* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
|
|
||||||
* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
|
|
||||||
* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
|
|
||||||
* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
|
|
||||||
* **2016-12-29 (v1.0.2)** : Optimize allocations for `Selection.Text` (thanks to @radovskyb).
|
|
||||||
* **2016-08-28 (v1.0.1)** : Optimize performance for large documents.
|
|
||||||
* **2016-07-27 (v1.0.0)** : Tag version 1.0.0.
|
|
||||||
* **2016-06-15** : Invalid selector strings internally compile to a `Matcher` implementation that never matches any node (instead of a panic). So for example, `doc.Find("~")` returns an empty `*Selection` object.
|
|
||||||
* **2016-02-02** : Add `NodeName` utility function similar to the DOM's `nodeName` property. It returns the tag name of the first element in a selection, and other relevant values of non-element nodes (see godoc for details). Add `OuterHtml` utility function similar to the DOM's `outerHTML` property (named `OuterHtml` in small caps for consistency with the existing `Html` method on the `Selection`).
|
|
||||||
* **2015-04-20** : Add `AttrOr` helper method to return the attribute's value or a default value if absent. Thanks to [piotrkowalczuk][piotr].
|
|
||||||
* **2015-02-04** : Add more manipulation functions - Prepend* - thanks again to [Andrew Stone][thatguystone].
|
|
||||||
* **2014-11-28** : Add more manipulation functions - ReplaceWith*, Wrap* and Unwrap - thanks again to [Andrew Stone][thatguystone].
|
|
||||||
* **2014-11-07** : Add manipulation functions (thanks to [Andrew Stone][thatguystone]) and `*Matcher` functions, that receive compiled cascadia selectors instead of selector strings, thus avoiding potential panics thrown by goquery via `cascadia.MustCompile` calls. This results in better performance (selectors can be compiled once and reused) and more idiomatic error handling (you can handle cascadia's compilation errors, instead of recovering from panics, which had been bugging me for a long time). Note that the actual type expected is a `Matcher` interface, that `cascadia.Selector` implements. Other matcher implementations could be used.
|
|
||||||
* **2014-11-06** : Change import paths of net/html to golang.org/x/net/html (see https://groups.google.com/forum/#!topic/golang-nuts/eD8dh3T9yyA). Make sure to update your code to use the new import path too when you call goquery with `html.Node`s.
|
|
||||||
* **v0.3.2** : Add `NewDocumentFromReader()` (thanks jweir) which allows creating a goquery document from an io.Reader.
|
|
||||||
* **v0.3.1** : Add `NewDocumentFromResponse()` (thanks assassingj) which allows creating a goquery document from an http response.
|
|
||||||
* **v0.3.0** : Add `EachWithBreak()` which allows to break out of an `Each()` loop by returning false. This function was added instead of changing the existing `Each()` to avoid breaking compatibility.
|
|
||||||
* **v0.2.1** : Make go-getable, now that [go.net/html is Go1.0-compatible][gonet] (thanks to @matrixik for pointing this out).
|
|
||||||
* **v0.2.0** : Add support for negative indices in Slice(). **BREAKING CHANGE** `Document.Root` is removed, `Document` is now a `Selection` itself (a selection of one, the root element, just like `Document.Root` was before). Add jQuery's Closest() method.
|
|
||||||
* **v0.1.1** : Add benchmarks to use as baseline for refactorings, refactor Next...() and Prev...() methods to use the new html package's linked list features (Next/PrevSibling, FirstChild). Good performance boost (40+% in some cases).
|
|
||||||
* **v0.1.0** : Initial release.
|
|
||||||
|
|
||||||
## API
|
|
||||||
|
|
||||||
goquery exposes two structs, `Document` and `Selection`, and the `Matcher` interface. Unlike jQuery, which is loaded as part of a DOM document, and thus acts on its containing document, goquery doesn't know which HTML document to act upon. So it needs to be told, and that's what the `Document` type is for. It holds the root document node as the initial Selection value to manipulate.
|
|
||||||
|
|
||||||
jQuery often has many variants for the same function (no argument, a selector string argument, a jQuery object argument, a DOM element argument, ...). Instead of exposing the same features in goquery as a single method with variadic empty interface arguments, statically-typed signatures are used following this naming convention:
|
|
||||||
|
|
||||||
* When the jQuery equivalent can be called with no argument, it has the same name as jQuery for the no argument signature (e.g.: `Prev()`), and the version with a selector string argument is called `XxxFiltered()` (e.g.: `PrevFiltered()`)
|
|
||||||
* When the jQuery equivalent **requires** one argument, the same name as jQuery is used for the selector string version (e.g.: `Is()`)
|
|
||||||
* The signatures accepting a jQuery object as argument are defined in goquery as `XxxSelection()` and take a `*Selection` object as argument (e.g.: `FilterSelection()`)
|
|
||||||
* The signatures accepting a DOM element as argument in jQuery are defined in goquery as `XxxNodes()` and take a variadic argument of type `*html.Node` (e.g.: `FilterNodes()`)
|
|
||||||
* The signatures accepting a function as argument in jQuery are defined in goquery as `XxxFunction()` and take a function as argument (e.g.: `FilterFunction()`)
|
|
||||||
* The goquery methods that can be called with a selector string have a corresponding version that take a `Matcher` interface and are defined as `XxxMatcher()` (e.g.: `IsMatcher()`)
|
|
||||||
|
|
||||||
Utility functions that are not in jQuery but are useful in Go are implemented as functions (that take a `*Selection` as parameter), to avoid a potential naming clash on the `*Selection`'s methods (reserved for jQuery-equivalent behaviour).
|
|
||||||
|
|
||||||
The complete [godoc reference documentation can be found here][doc].
|
|
||||||
|
|
||||||
Please note that Cascadia's selectors do not necessarily match all supported selectors of jQuery (Sizzle). See the [cascadia project][cascadia] for details. Invalid selector strings compile to a `Matcher` that fails to match any node. Behaviour of the various functions that take a selector string as argument follows from that fact, e.g. (where `~` is an invalid selector string):
|
|
||||||
|
|
||||||
* `Find("~")` returns an empty selection because the selector string doesn't match anything.
|
|
||||||
* `Add("~")` returns a new selection that holds the same nodes as the original selection, because it didn't add any node (selector string didn't match anything).
|
|
||||||
* `ParentsFiltered("~")` returns an empty selection because the selector string doesn't match anything.
|
|
||||||
* `ParentsUntil("~")` returns all parents of the selection because the selector string didn't match any element to stop before the top element.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
See some tips and tricks in the [wiki][].
|
|
||||||
|
|
||||||
Adapted from example_test.go:
|
|
||||||
|
|
||||||
```Go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/PuerkitoBio/goquery"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleScrape() {
|
|
||||||
// Request the HTML page.
|
|
||||||
res, err := http.Get("http://metalsucks.net")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load the HTML document
|
|
||||||
doc, err := goquery.NewDocumentFromReader(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the review items
|
|
||||||
doc.Find(".sidebar-reviews article .content-block").Each(func(i int, s *goquery.Selection) {
|
|
||||||
// For each item found, get the band and title
|
|
||||||
band := s.Find("a").Text()
|
|
||||||
title := s.Find("i").Text()
|
|
||||||
fmt.Printf("Review %d: %s - %s\n", i, band, title)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ExampleScrape()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Related Projects
|
|
||||||
|
|
||||||
- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
|
|
||||||
- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
|
|
||||||
- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
|
|
||||||
- [asciimoo/colly](https://github.com/asciimoo/colly), a lightning fast and elegant Scraping Framework
|
|
||||||
- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
There are a number of ways you can support the project:
|
|
||||||
|
|
||||||
* Use it, star it, build something with it, spread the word!
|
|
||||||
- If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
|
|
||||||
* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
|
|
||||||
- Please search existing issues before opening a new one - it may have already been adressed.
|
|
||||||
* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
|
|
||||||
- Make sure new code is tested.
|
|
||||||
- Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
|
|
||||||
|
|
||||||
If you desperately want to send money my way, I have a BuyMeACoffee.com page:
|
|
||||||
|
|
||||||
<a href="https://www.buymeacoffee.com/mna" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia's license is [here][caslic].
|
|
||||||
|
|
||||||
[jquery]: http://jquery.com/
|
|
||||||
[go]: http://golang.org/
|
|
||||||
[cascadia]: https://github.com/andybalholm/cascadia
|
|
||||||
[cascadiacli]: https://github.com/suntong/cascadia
|
|
||||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
|
||||||
[golic]: http://golang.org/LICENSE
|
|
||||||
[caslic]: https://github.com/andybalholm/cascadia/blob/master/LICENSE
|
|
||||||
[doc]: http://godoc.org/github.com/PuerkitoBio/goquery
|
|
||||||
[index]: http://api.jquery.com/index/
|
|
||||||
[gonet]: https://github.com/golang/net/
|
|
||||||
[html]: http://godoc.org/golang.org/x/net/html
|
|
||||||
[wiki]: https://github.com/PuerkitoBio/goquery/wiki/Tips-and-tricks
|
|
||||||
[thatguystone]: https://github.com/thatguystone
|
|
||||||
[piotr]: https://github.com/piotrkowalczuk
|
|
||||||
[goq]: https://github.com/andrewstuart/goq
|
|
124
vendor/github.com/PuerkitoBio/goquery/array.go
generated
vendored
124
vendor/github.com/PuerkitoBio/goquery/array.go
generated
vendored
@ -1,124 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxUint = ^uint(0)
|
|
||||||
maxInt = int(maxUint >> 1)
|
|
||||||
|
|
||||||
// ToEnd is a special index value that can be used as end index in a call
|
|
||||||
// to Slice so that all elements are selected until the end of the Selection.
|
|
||||||
// It is equivalent to passing (*Selection).Length().
|
|
||||||
ToEnd = maxInt
|
|
||||||
)
|
|
||||||
|
|
||||||
// First reduces the set of matched elements to the first in the set.
|
|
||||||
// It returns a new Selection object, and an empty Selection object if the
|
|
||||||
// the selection is empty.
|
|
||||||
func (s *Selection) First() *Selection {
|
|
||||||
return s.Eq(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Last reduces the set of matched elements to the last in the set.
|
|
||||||
// It returns a new Selection object, and an empty Selection object if
|
|
||||||
// the selection is empty.
|
|
||||||
func (s *Selection) Last() *Selection {
|
|
||||||
return s.Eq(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Eq reduces the set of matched elements to the one at the specified index.
|
|
||||||
// If a negative index is given, it counts backwards starting at the end of the
|
|
||||||
// set. It returns a new Selection object, and an empty Selection object if the
|
|
||||||
// index is invalid.
|
|
||||||
func (s *Selection) Eq(index int) *Selection {
|
|
||||||
if index < 0 {
|
|
||||||
index += len(s.Nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if index >= len(s.Nodes) || index < 0 {
|
|
||||||
return newEmptySelection(s.document)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.Slice(index, index+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slice reduces the set of matched elements to a subset specified by a range
|
|
||||||
// of indices. The start index is 0-based and indicates the index of the first
|
|
||||||
// element to select. The end index is 0-based and indicates the index at which
|
|
||||||
// the elements stop being selected (the end index is not selected).
|
|
||||||
//
|
|
||||||
// The indices may be negative, in which case they represent an offset from the
|
|
||||||
// end of the selection.
|
|
||||||
//
|
|
||||||
// The special value ToEnd may be specified as end index, in which case all elements
|
|
||||||
// until the end are selected. This works both for a positive and negative start
|
|
||||||
// index.
|
|
||||||
func (s *Selection) Slice(start, end int) *Selection {
|
|
||||||
if start < 0 {
|
|
||||||
start += len(s.Nodes)
|
|
||||||
}
|
|
||||||
if end == ToEnd {
|
|
||||||
end = len(s.Nodes)
|
|
||||||
} else if end < 0 {
|
|
||||||
end += len(s.Nodes)
|
|
||||||
}
|
|
||||||
return pushStack(s, s.Nodes[start:end])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves the underlying node at the specified index.
|
|
||||||
// Get without parameter is not implemented, since the node array is available
|
|
||||||
// on the Selection object.
|
|
||||||
func (s *Selection) Get(index int) *html.Node {
|
|
||||||
if index < 0 {
|
|
||||||
index += len(s.Nodes) // Negative index gets from the end
|
|
||||||
}
|
|
||||||
return s.Nodes[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index returns the position of the first element within the Selection object
|
|
||||||
// relative to its sibling elements.
|
|
||||||
func (s *Selection) Index() int {
|
|
||||||
if len(s.Nodes) > 0 {
|
|
||||||
return newSingleSelection(s.Nodes[0], s.document).PrevAll().Length()
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexSelector returns the position of the first element within the
|
|
||||||
// Selection object relative to the elements matched by the selector, or -1 if
|
|
||||||
// not found.
|
|
||||||
func (s *Selection) IndexSelector(selector string) int {
|
|
||||||
if len(s.Nodes) > 0 {
|
|
||||||
sel := s.document.Find(selector)
|
|
||||||
return indexInSlice(sel.Nodes, s.Nodes[0])
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexMatcher returns the position of the first element within the
|
|
||||||
// Selection object relative to the elements matched by the matcher, or -1 if
|
|
||||||
// not found.
|
|
||||||
func (s *Selection) IndexMatcher(m Matcher) int {
|
|
||||||
if len(s.Nodes) > 0 {
|
|
||||||
sel := s.document.FindMatcher(m)
|
|
||||||
return indexInSlice(sel.Nodes, s.Nodes[0])
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexOfNode returns the position of the specified node within the Selection
|
|
||||||
// object, or -1 if not found.
|
|
||||||
func (s *Selection) IndexOfNode(node *html.Node) int {
|
|
||||||
return indexInSlice(s.Nodes, node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexOfSelection returns the position of the first node in the specified
|
|
||||||
// Selection object within this Selection object, or -1 if not found.
|
|
||||||
func (s *Selection) IndexOfSelection(sel *Selection) int {
|
|
||||||
if sel != nil && len(sel.Nodes) > 0 {
|
|
||||||
return indexInSlice(s.Nodes, sel.Nodes[0])
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
123
vendor/github.com/PuerkitoBio/goquery/doc.go
generated
vendored
123
vendor/github.com/PuerkitoBio/goquery/doc.go
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
// Copyright (c) 2012-2016, Martin Angers & Contributors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
// are permitted provided that the following conditions are met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer in the documentation and/or
|
|
||||||
// other materials provided with the distribution.
|
|
||||||
// * Neither the name of the author nor the names of its contributors may be used to
|
|
||||||
// endorse or promote products derived from this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
|
|
||||||
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
||||||
// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
|
|
||||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
||||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
|
|
||||||
// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package goquery implements features similar to jQuery, including the chainable
|
|
||||||
syntax, to manipulate and query an HTML document.
|
|
||||||
|
|
||||||
It brings a syntax and a set of features similar to jQuery to the Go language.
|
|
||||||
It is based on Go's net/html package and the CSS Selector library cascadia.
|
|
||||||
Since the net/html parser returns nodes, and not a full-featured DOM
|
|
||||||
tree, jQuery's stateful manipulation functions (like height(), css(), detach())
|
|
||||||
have been left off.
|
|
||||||
|
|
||||||
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is
|
|
||||||
the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML.
|
|
||||||
See the repository's wiki for various options on how to do this.
|
|
||||||
|
|
||||||
Syntax-wise, it is as close as possible to jQuery, with the same method names when
|
|
||||||
possible, and that warm and fuzzy chainable interface. jQuery being the
|
|
||||||
ultra-popular library that it is, writing a similar HTML-manipulating
|
|
||||||
library was better to follow its API than to start anew (in the same spirit as
|
|
||||||
Go's fmt package), even though some of its methods are less than intuitive (looking
|
|
||||||
at you, index()...).
|
|
||||||
|
|
||||||
It is hosted on GitHub, along with additional documentation in the README.md
|
|
||||||
file: https://github.com/puerkitobio/goquery
|
|
||||||
|
|
||||||
Please note that because of the net/html dependency, goquery requires Go1.1+.
|
|
||||||
|
|
||||||
The various methods are split into files based on the category of behavior.
|
|
||||||
The three dots (...) indicate that various "overloads" are available.
|
|
||||||
|
|
||||||
* array.go : array-like positional manipulation of the selection.
|
|
||||||
- Eq()
|
|
||||||
- First()
|
|
||||||
- Get()
|
|
||||||
- Index...()
|
|
||||||
- Last()
|
|
||||||
- Slice()
|
|
||||||
|
|
||||||
* expand.go : methods that expand or augment the selection's set.
|
|
||||||
- Add...()
|
|
||||||
- AndSelf()
|
|
||||||
- Union(), which is an alias for AddSelection()
|
|
||||||
|
|
||||||
* filter.go : filtering methods, that reduce the selection's set.
|
|
||||||
- End()
|
|
||||||
- Filter...()
|
|
||||||
- Has...()
|
|
||||||
- Intersection(), which is an alias of FilterSelection()
|
|
||||||
- Not...()
|
|
||||||
|
|
||||||
* iteration.go : methods to loop over the selection's nodes.
|
|
||||||
- Each()
|
|
||||||
- EachWithBreak()
|
|
||||||
- Map()
|
|
||||||
|
|
||||||
* manipulation.go : methods for modifying the document
|
|
||||||
- After...()
|
|
||||||
- Append...()
|
|
||||||
- Before...()
|
|
||||||
- Clone()
|
|
||||||
- Empty()
|
|
||||||
- Prepend...()
|
|
||||||
- Remove...()
|
|
||||||
- ReplaceWith...()
|
|
||||||
- Unwrap()
|
|
||||||
- Wrap...()
|
|
||||||
- WrapAll...()
|
|
||||||
- WrapInner...()
|
|
||||||
|
|
||||||
* property.go : methods that inspect and get the node's properties values.
|
|
||||||
- Attr*(), RemoveAttr(), SetAttr()
|
|
||||||
- AddClass(), HasClass(), RemoveClass(), ToggleClass()
|
|
||||||
- Html()
|
|
||||||
- Length()
|
|
||||||
- Size(), which is an alias for Length()
|
|
||||||
- Text()
|
|
||||||
|
|
||||||
* query.go : methods that query, or reflect, a node's identity.
|
|
||||||
- Contains()
|
|
||||||
- Is...()
|
|
||||||
|
|
||||||
* traversal.go : methods to traverse the HTML document tree.
|
|
||||||
- Children...()
|
|
||||||
- Contents()
|
|
||||||
- Find...()
|
|
||||||
- Next...()
|
|
||||||
- Parent[s]...()
|
|
||||||
- Prev...()
|
|
||||||
- Siblings...()
|
|
||||||
|
|
||||||
* type.go : definition of the types exposed by goquery.
|
|
||||||
- Document
|
|
||||||
- Selection
|
|
||||||
- Matcher
|
|
||||||
|
|
||||||
* utilities.go : definition of helper functions (and not methods on a *Selection)
|
|
||||||
that are not part of jQuery, but are useful to goquery.
|
|
||||||
- NodeName
|
|
||||||
- OuterHtml
|
|
||||||
*/
|
|
||||||
package goquery
|
|
70
vendor/github.com/PuerkitoBio/goquery/expand.go
generated
vendored
70
vendor/github.com/PuerkitoBio/goquery/expand.go
generated
vendored
@ -1,70 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import "golang.org/x/net/html"
|
|
||||||
|
|
||||||
// Add adds the selector string's matching nodes to those in the current
|
|
||||||
// selection and returns a new Selection object.
|
|
||||||
// The selector string is run in the context of the document of the current
|
|
||||||
// Selection object.
|
|
||||||
func (s *Selection) Add(selector string) *Selection {
|
|
||||||
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddMatcher adds the matcher's matching nodes to those in the current
|
|
||||||
// selection and returns a new Selection object.
|
|
||||||
// The matcher is run in the context of the document of the current
|
|
||||||
// Selection object.
|
|
||||||
func (s *Selection) AddMatcher(m Matcher) *Selection {
|
|
||||||
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSelection adds the specified Selection object's nodes to those in the
|
|
||||||
// current selection and returns a new Selection object.
|
|
||||||
func (s *Selection) AddSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.AddNodes()
|
|
||||||
}
|
|
||||||
return s.AddNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Union is an alias for AddSelection.
|
|
||||||
func (s *Selection) Union(sel *Selection) *Selection {
|
|
||||||
return s.AddSelection(sel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNodes adds the specified nodes to those in the
|
|
||||||
// current selection and returns a new Selection object.
|
|
||||||
func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AndSelf adds the previous set of elements on the stack to the current set.
|
|
||||||
// It returns a new Selection object containing the current Selection combined
|
|
||||||
// with the previous one.
|
|
||||||
// Deprecated: This function has been deprecated and is now an alias for AddBack().
|
|
||||||
func (s *Selection) AndSelf() *Selection {
|
|
||||||
return s.AddBack()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBack adds the previous set of elements on the stack to the current set.
|
|
||||||
// It returns a new Selection object containing the current Selection combined
|
|
||||||
// with the previous one.
|
|
||||||
func (s *Selection) AddBack() *Selection {
|
|
||||||
return s.AddSelection(s.prevSel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBackFiltered reduces the previous set of elements on the stack to those that
|
|
||||||
// match the selector string, and adds them to the current set.
|
|
||||||
// It returns a new Selection object containing the current Selection combined
|
|
||||||
// with the filtered previous one
|
|
||||||
func (s *Selection) AddBackFiltered(selector string) *Selection {
|
|
||||||
return s.AddSelection(s.prevSel.Filter(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBackMatcher reduces the previous set of elements on the stack to those that match
|
|
||||||
// the mateher, and adds them to the curernt set.
|
|
||||||
// It returns a new Selection object containing the current Selection combined
|
|
||||||
// with the filtered previous one
|
|
||||||
func (s *Selection) AddBackMatcher(m Matcher) *Selection {
|
|
||||||
return s.AddSelection(s.prevSel.FilterMatcher(m))
|
|
||||||
}
|
|
163
vendor/github.com/PuerkitoBio/goquery/filter.go
generated
vendored
163
vendor/github.com/PuerkitoBio/goquery/filter.go
generated
vendored
@ -1,163 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import "golang.org/x/net/html"
|
|
||||||
|
|
||||||
// Filter reduces the set of matched elements to those that match the selector string.
|
|
||||||
// It returns a new Selection object for this subset of matching elements.
|
|
||||||
func (s *Selection) Filter(selector string) *Selection {
|
|
||||||
return s.FilterMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterMatcher reduces the set of matched elements to those that match
|
|
||||||
// the given matcher. It returns a new Selection object for this subset
|
|
||||||
// of matching elements.
|
|
||||||
func (s *Selection) FilterMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, winnow(s, m, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not removes elements from the Selection that match the selector string.
|
|
||||||
// It returns a new Selection object with the matching elements removed.
|
|
||||||
func (s *Selection) Not(selector string) *Selection {
|
|
||||||
return s.NotMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotMatcher removes elements from the Selection that match the given matcher.
|
|
||||||
// It returns a new Selection object with the matching elements removed.
|
|
||||||
func (s *Selection) NotMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, winnow(s, m, false))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterFunction reduces the set of matched elements to those that pass the function's test.
|
|
||||||
// It returns a new Selection object for this subset of elements.
|
|
||||||
func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
|
|
||||||
return pushStack(s, winnowFunction(s, f, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotFunction removes elements from the Selection that pass the function's test.
|
|
||||||
// It returns a new Selection object with the matching elements removed.
|
|
||||||
func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
|
|
||||||
return pushStack(s, winnowFunction(s, f, false))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterNodes reduces the set of matched elements to those that match the specified nodes.
|
|
||||||
// It returns a new Selection object for this subset of elements.
|
|
||||||
func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, winnowNodes(s, nodes, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotNodes removes elements from the Selection that match the specified nodes.
|
|
||||||
// It returns a new Selection object with the matching elements removed.
|
|
||||||
func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, winnowNodes(s, nodes, false))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterSelection reduces the set of matched elements to those that match a
|
|
||||||
// node in the specified Selection object.
|
|
||||||
// It returns a new Selection object for this subset of elements.
|
|
||||||
func (s *Selection) FilterSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return pushStack(s, winnowNodes(s, nil, true))
|
|
||||||
}
|
|
||||||
return pushStack(s, winnowNodes(s, sel.Nodes, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotSelection removes elements from the Selection that match a node in the specified
|
|
||||||
// Selection object. It returns a new Selection object with the matching elements removed.
|
|
||||||
func (s *Selection) NotSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return pushStack(s, winnowNodes(s, nil, false))
|
|
||||||
}
|
|
||||||
return pushStack(s, winnowNodes(s, sel.Nodes, false))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intersection is an alias for FilterSelection.
|
|
||||||
func (s *Selection) Intersection(sel *Selection) *Selection {
|
|
||||||
return s.FilterSelection(sel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has reduces the set of matched elements to those that have a descendant
|
|
||||||
// that matches the selector.
|
|
||||||
// It returns a new Selection object with the matching elements.
|
|
||||||
func (s *Selection) Has(selector string) *Selection {
|
|
||||||
return s.HasSelection(s.document.Find(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasMatcher reduces the set of matched elements to those that have a descendant
|
|
||||||
// that matches the matcher.
|
|
||||||
// It returns a new Selection object with the matching elements.
|
|
||||||
func (s *Selection) HasMatcher(m Matcher) *Selection {
|
|
||||||
return s.HasSelection(s.document.FindMatcher(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasNodes reduces the set of matched elements to those that have a
|
|
||||||
// descendant that matches one of the nodes.
|
|
||||||
// It returns a new Selection object with the matching elements.
|
|
||||||
func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return s.FilterFunction(func(_ int, sel *Selection) bool {
|
|
||||||
// Add all nodes that contain one of the specified nodes
|
|
||||||
for _, n := range nodes {
|
|
||||||
if sel.Contains(n) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasSelection reduces the set of matched elements to those that have a
|
|
||||||
// descendant that matches one of the nodes of the specified Selection object.
|
|
||||||
// It returns a new Selection object with the matching elements.
|
|
||||||
func (s *Selection) HasSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.HasNodes()
|
|
||||||
}
|
|
||||||
return s.HasNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// End ends the most recent filtering operation in the current chain and
|
|
||||||
// returns the set of matched elements to its previous state.
|
|
||||||
func (s *Selection) End() *Selection {
|
|
||||||
if s.prevSel != nil {
|
|
||||||
return s.prevSel
|
|
||||||
}
|
|
||||||
return newEmptySelection(s.document)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter based on the matcher, and the indicator to keep (Filter) or
|
|
||||||
// to get rid of (Not) the matching elements.
|
|
||||||
func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
|
|
||||||
// Optimize if keep is requested
|
|
||||||
if keep {
|
|
||||||
return m.Filter(sel.Nodes)
|
|
||||||
}
|
|
||||||
// Use grep
|
|
||||||
return grep(sel, func(i int, s *Selection) bool {
|
|
||||||
return !m.Match(s.Get(0))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter based on an array of nodes, and the indicator to keep (Filter) or
|
|
||||||
// to get rid of (Not) the matching elements.
|
|
||||||
func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
|
|
||||||
if len(nodes)+len(sel.Nodes) < minNodesForSet {
|
|
||||||
return grep(sel, func(i int, s *Selection) bool {
|
|
||||||
return isInSlice(nodes, s.Get(0)) == keep
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
set := make(map[*html.Node]bool)
|
|
||||||
for _, n := range nodes {
|
|
||||||
set[n] = true
|
|
||||||
}
|
|
||||||
return grep(sel, func(i int, s *Selection) bool {
|
|
||||||
return set[s.Get(0)] == keep
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter based on a function test, and the indicator to keep (Filter) or
|
|
||||||
// to get rid of (Not) the matching elements.
|
|
||||||
func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
|
|
||||||
return grep(sel, func(i int, s *Selection) bool {
|
|
||||||
return f(i, s) == keep
|
|
||||||
})
|
|
||||||
}
|
|
39
vendor/github.com/PuerkitoBio/goquery/iteration.go
generated
vendored
39
vendor/github.com/PuerkitoBio/goquery/iteration.go
generated
vendored
@ -1,39 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
// Each iterates over a Selection object, executing a function for each
|
|
||||||
// matched element. It returns the current Selection object. The function
|
|
||||||
// f is called for each element in the selection with the index of the
|
|
||||||
// element in that selection starting at 0, and a *Selection that contains
|
|
||||||
// only that element.
|
|
||||||
func (s *Selection) Each(f func(int, *Selection)) *Selection {
|
|
||||||
for i, n := range s.Nodes {
|
|
||||||
f(i, newSingleSelection(n, s.document))
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// EachWithBreak iterates over a Selection object, executing a function for each
|
|
||||||
// matched element. It is identical to Each except that it is possible to break
|
|
||||||
// out of the loop by returning false in the callback function. It returns the
|
|
||||||
// current Selection object.
|
|
||||||
func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
|
|
||||||
for i, n := range s.Nodes {
|
|
||||||
if !f(i, newSingleSelection(n, s.document)) {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map passes each element in the current matched set through a function,
|
|
||||||
// producing a slice of string holding the returned values. The function
|
|
||||||
// f is called for each element in the selection with the index of the
|
|
||||||
// element in that selection starting at 0, and a *Selection that contains
|
|
||||||
// only that element.
|
|
||||||
func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
|
|
||||||
for i, n := range s.Nodes {
|
|
||||||
result = append(result, f(i, newSingleSelection(n, s.document)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
574
vendor/github.com/PuerkitoBio/goquery/manipulation.go
generated
vendored
574
vendor/github.com/PuerkitoBio/goquery/manipulation.go
generated
vendored
@ -1,574 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// After applies the selector from the root document and inserts the matched elements
|
|
||||||
// after the elements in the set of matched elements.
|
|
||||||
//
|
|
||||||
// If one of the matched elements in the selection is not currently in the
|
|
||||||
// document, it's impossible to insert nodes after it, so it will be ignored.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) After(selector string) *Selection {
|
|
||||||
return s.AfterMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterMatcher applies the matcher from the root document and inserts the matched elements
|
|
||||||
// after the elements in the set of matched elements.
|
|
||||||
//
|
|
||||||
// If one of the matched elements in the selection is not currently in the
|
|
||||||
// document, it's impossible to insert nodes after it, so it will be ignored.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AfterMatcher(m Matcher) *Selection {
|
|
||||||
return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterSelection inserts the elements in the selection after each element in the set of matched
|
|
||||||
// elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AfterSelection(sel *Selection) *Selection {
|
|
||||||
return s.AfterNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterHtml parses the html and inserts it after the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AfterHtml(html string) *Selection {
|
|
||||||
return s.AfterNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterNodes inserts the nodes after each element in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
|
|
||||||
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
|
|
||||||
if sn.Parent != nil {
|
|
||||||
sn.Parent.InsertBefore(n, sn.NextSibling)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append appends the elements specified by the selector to the end of each element
|
|
||||||
// in the set of matched elements, following those rules:
|
|
||||||
//
|
|
||||||
// 1) The selector is applied to the root document.
|
|
||||||
//
|
|
||||||
// 2) Elements that are part of the document will be moved to the new location.
|
|
||||||
//
|
|
||||||
// 3) If there are multiple locations to append to, cloned nodes will be
|
|
||||||
// appended to all target locations except the last one, which will be moved
|
|
||||||
// as noted in (2).
|
|
||||||
func (s *Selection) Append(selector string) *Selection {
|
|
||||||
return s.AppendMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendMatcher appends the elements specified by the matcher to the end of each element
|
|
||||||
// in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AppendMatcher(m Matcher) *Selection {
|
|
||||||
return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendSelection appends the elements in the selection to the end of each element
|
|
||||||
// in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AppendSelection(sel *Selection) *Selection {
|
|
||||||
return s.AppendNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendHtml parses the html and appends it to the set of matched elements.
|
|
||||||
func (s *Selection) AppendHtml(html string) *Selection {
|
|
||||||
return s.AppendNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendNodes appends the specified nodes to each node in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
|
|
||||||
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
|
|
||||||
sn.AppendChild(n)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Before inserts the matched elements before each element in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) Before(selector string) *Selection {
|
|
||||||
return s.BeforeMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) BeforeMatcher(m Matcher) *Selection {
|
|
||||||
return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeforeSelection inserts the elements in the selection before each element in the set of matched
|
|
||||||
// elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) BeforeSelection(sel *Selection) *Selection {
|
|
||||||
return s.BeforeNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeforeHtml parses the html and inserts it before the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) BeforeHtml(html string) *Selection {
|
|
||||||
return s.BeforeNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeforeNodes inserts the nodes before each element in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
|
|
||||||
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
|
|
||||||
if sn.Parent != nil {
|
|
||||||
sn.Parent.InsertBefore(n, sn)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
|
|
||||||
// attached to the document.
|
|
||||||
func (s *Selection) Clone() *Selection {
|
|
||||||
ns := newEmptySelection(s.document)
|
|
||||||
ns.Nodes = cloneNodes(s.Nodes)
|
|
||||||
return ns
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty removes all children nodes from the set of matched elements.
|
|
||||||
// It returns the children nodes in a new Selection.
|
|
||||||
func (s *Selection) Empty() *Selection {
|
|
||||||
var nodes []*html.Node
|
|
||||||
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
for c := n.FirstChild; c != nil; c = n.FirstChild {
|
|
||||||
n.RemoveChild(c)
|
|
||||||
nodes = append(nodes, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pushStack(s, nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepend prepends the elements specified by the selector to each element in
|
|
||||||
// the set of matched elements, following the same rules as Append.
|
|
||||||
func (s *Selection) Prepend(selector string) *Selection {
|
|
||||||
return s.PrependMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrependMatcher prepends the elements specified by the matcher to each
|
|
||||||
// element in the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) PrependMatcher(m Matcher) *Selection {
|
|
||||||
return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrependSelection prepends the elements in the selection to each element in
|
|
||||||
// the set of matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) PrependSelection(sel *Selection) *Selection {
|
|
||||||
return s.PrependNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrependHtml parses the html and prepends it to the set of matched elements.
|
|
||||||
func (s *Selection) PrependHtml(html string) *Selection {
|
|
||||||
return s.PrependNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrependNodes prepends the specified nodes to each node in the set of
|
|
||||||
// matched elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
|
|
||||||
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
|
|
||||||
// sn.FirstChild may be nil, in which case this functions like
|
|
||||||
// sn.AppendChild()
|
|
||||||
sn.InsertBefore(n, sn.FirstChild)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the set of matched elements from the document.
|
|
||||||
// It returns the same selection, now consisting of nodes not in the document.
|
|
||||||
func (s *Selection) Remove() *Selection {
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
if n.Parent != nil {
|
|
||||||
n.Parent.RemoveChild(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveFiltered removes the set of matched elements by selector.
|
|
||||||
// It returns the Selection of removed nodes.
|
|
||||||
func (s *Selection) RemoveFiltered(selector string) *Selection {
|
|
||||||
return s.RemoveMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveMatcher removes the set of matched elements.
|
|
||||||
// It returns the Selection of removed nodes.
|
|
||||||
func (s *Selection) RemoveMatcher(m Matcher) *Selection {
|
|
||||||
return s.FilterMatcher(m).Remove()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceWith replaces each element in the set of matched elements with the
|
|
||||||
// nodes matched by the given selector.
|
|
||||||
// It returns the removed elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) ReplaceWith(selector string) *Selection {
|
|
||||||
return s.ReplaceWithMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceWithMatcher replaces each element in the set of matched elements with
|
|
||||||
// the nodes matched by the given Matcher.
|
|
||||||
// It returns the removed elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
|
|
||||||
return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceWithSelection replaces each element in the set of matched elements with
|
|
||||||
// the nodes from the given Selection.
|
|
||||||
// It returns the removed elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
|
|
||||||
return s.ReplaceWithNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceWithHtml replaces each element in the set of matched elements with
|
|
||||||
// the parsed HTML.
|
|
||||||
// It returns the removed elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) ReplaceWithHtml(html string) *Selection {
|
|
||||||
return s.ReplaceWithNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceWithNodes replaces each element in the set of matched elements with
|
|
||||||
// the given nodes.
|
|
||||||
// It returns the removed elements.
|
|
||||||
//
|
|
||||||
// This follows the same rules as Selection.Append.
|
|
||||||
func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
|
|
||||||
s.AfterNodes(ns...)
|
|
||||||
return s.Remove()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHtml sets the html content of each element in the selection to
|
|
||||||
// specified html string.
|
|
||||||
func (s *Selection) SetHtml(html string) *Selection {
|
|
||||||
return setHtmlNodes(s, parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetText sets the content of each element in the selection to specified content.
|
|
||||||
// The provided text string is escaped.
|
|
||||||
func (s *Selection) SetText(text string) *Selection {
|
|
||||||
return s.SetHtml(html.EscapeString(text))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap removes the parents of the set of matched elements, leaving the matched
|
|
||||||
// elements (and their siblings, if any) in their place.
|
|
||||||
// It returns the original selection.
|
|
||||||
func (s *Selection) Unwrap() *Selection {
|
|
||||||
s.Parent().Each(func(i int, ss *Selection) {
|
|
||||||
// For some reason, jquery allows unwrap to remove the <head> element, so
|
|
||||||
// allowing it here too. Same for <html>. Why it allows those elements to
|
|
||||||
// be unwrapped while not allowing body is a mystery to me.
|
|
||||||
if ss.Nodes[0].Data != "body" {
|
|
||||||
ss.ReplaceWithSelection(ss.Contents())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap wraps each element in the set of matched elements inside the first
|
|
||||||
// element matched by the given selector. The matched child is cloned before
|
|
||||||
// being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) Wrap(selector string) *Selection {
|
|
||||||
return s.WrapMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapMatcher wraps each element in the set of matched elements inside the
|
|
||||||
// first element matched by the given matcher. The matched child is cloned
|
|
||||||
// before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapMatcher(m Matcher) *Selection {
|
|
||||||
return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapSelection wraps each element in the set of matched elements inside the
|
|
||||||
// first element in the given Selection. The element is cloned before being
|
|
||||||
// inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapSelection(sel *Selection) *Selection {
|
|
||||||
return s.wrapNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapHtml wraps each element in the set of matched elements inside the inner-
|
|
||||||
// most child of the given HTML.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapHtml(html string) *Selection {
|
|
||||||
return s.wrapNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapNode wraps each element in the set of matched elements inside the inner-
|
|
||||||
// most child of the given node. The given node is copied before being inserted
|
|
||||||
// into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapNode(n *html.Node) *Selection {
|
|
||||||
return s.wrapNodes(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
|
|
||||||
s.Each(func(i int, ss *Selection) {
|
|
||||||
ss.wrapAllNodes(ns...)
|
|
||||||
})
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapAll wraps a single HTML structure, matched by the given selector, around
|
|
||||||
// all elements in the set of matched elements. The matched child is cloned
|
|
||||||
// before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapAll(selector string) *Selection {
|
|
||||||
return s.WrapAllMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
|
|
||||||
// around all elements in the set of matched elements. The matched child is
|
|
||||||
// cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
|
|
||||||
return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapAllSelection wraps a single HTML structure, the first node of the given
|
|
||||||
// Selection, around all elements in the set of matched elements. The matched
|
|
||||||
// child is cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
|
|
||||||
return s.wrapAllNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapAllHtml wraps the given HTML structure around all elements in the set of
|
|
||||||
// matched elements. The matched child is cloned before being inserted into the
|
|
||||||
// document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapAllHtml(html string) *Selection {
|
|
||||||
return s.wrapAllNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
|
|
||||||
if len(ns) > 0 {
|
|
||||||
return s.WrapAllNode(ns[0])
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapAllNode wraps the given node around the first element in the Selection,
|
|
||||||
// making all other nodes in the Selection children of the given node. The node
|
|
||||||
// is cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapAllNode(n *html.Node) *Selection {
|
|
||||||
if s.Size() == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
wrap := cloneNode(n)
|
|
||||||
|
|
||||||
first := s.Nodes[0]
|
|
||||||
if first.Parent != nil {
|
|
||||||
first.Parent.InsertBefore(wrap, first)
|
|
||||||
first.Parent.RemoveChild(first)
|
|
||||||
}
|
|
||||||
|
|
||||||
for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
|
|
||||||
wrap = c
|
|
||||||
}
|
|
||||||
|
|
||||||
newSingleSelection(wrap, s.document).AppendSelection(s)
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapInner wraps an HTML structure, matched by the given selector, around the
|
|
||||||
// content of element in the set of matched elements. The matched child is
|
|
||||||
// cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapInner(selector string) *Selection {
|
|
||||||
return s.WrapInnerMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
|
|
||||||
// around the content of element in the set of matched elements. The matched
|
|
||||||
// child is cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
|
|
||||||
return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapInnerSelection wraps an HTML structure, matched by the given selector,
|
|
||||||
// around the content of element in the set of matched elements. The matched
|
|
||||||
// child is cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
|
|
||||||
return s.wrapInnerNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
|
|
||||||
// the content of element in the set of matched elements. The matched child is
|
|
||||||
// cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapInnerHtml(html string) *Selection {
|
|
||||||
return s.wrapInnerNodes(parseHtml(html)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapInnerNode wraps an HTML structure, matched by the given selector, around
|
|
||||||
// the content of element in the set of matched elements. The matched child is
|
|
||||||
// cloned before being inserted into the document.
|
|
||||||
//
|
|
||||||
// It returns the original set of elements.
|
|
||||||
func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
|
|
||||||
return s.wrapInnerNodes(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
|
|
||||||
if len(ns) == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Each(func(i int, s *Selection) {
|
|
||||||
contents := s.Contents()
|
|
||||||
|
|
||||||
if contents.Size() > 0 {
|
|
||||||
contents.wrapAllNodes(ns...)
|
|
||||||
} else {
|
|
||||||
s.AppendNodes(cloneNode(ns[0]))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseHtml(h string) []*html.Node {
|
|
||||||
// Errors are only returned when the io.Reader returns any error besides
|
|
||||||
// EOF, but strings.Reader never will
|
|
||||||
nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
|
|
||||||
if err != nil {
|
|
||||||
panic("goquery: failed to parse HTML: " + err.Error())
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
for c := n.FirstChild; c != nil; c = n.FirstChild {
|
|
||||||
n.RemoveChild(c)
|
|
||||||
}
|
|
||||||
for _, c := range ns {
|
|
||||||
n.AppendChild(cloneNode(c))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the first child that is an ElementNode
|
|
||||||
func getFirstChildEl(n *html.Node) *html.Node {
|
|
||||||
c := n.FirstChild
|
|
||||||
for c != nil && c.Type != html.ElementNode {
|
|
||||||
c = c.NextSibling
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deep copy a slice of nodes.
|
|
||||||
func cloneNodes(ns []*html.Node) []*html.Node {
|
|
||||||
cns := make([]*html.Node, 0, len(ns))
|
|
||||||
|
|
||||||
for _, n := range ns {
|
|
||||||
cns = append(cns, cloneNode(n))
|
|
||||||
}
|
|
||||||
|
|
||||||
return cns
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deep copy a node. The new node has clones of all the original node's
|
|
||||||
// children but none of its parents or siblings.
|
|
||||||
func cloneNode(n *html.Node) *html.Node {
|
|
||||||
nn := &html.Node{
|
|
||||||
Type: n.Type,
|
|
||||||
DataAtom: n.DataAtom,
|
|
||||||
Data: n.Data,
|
|
||||||
Attr: make([]html.Attribute, len(n.Attr)),
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(nn.Attr, n.Attr)
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
nn.AppendChild(cloneNode(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
|
|
||||||
f func(sn *html.Node, n *html.Node)) *Selection {
|
|
||||||
|
|
||||||
lasti := s.Size() - 1
|
|
||||||
|
|
||||||
// net.Html doesn't provide document fragments for insertion, so to get
|
|
||||||
// things in the correct order with After() and Prepend(), the callback
|
|
||||||
// needs to be called on the reverse of the nodes.
|
|
||||||
if reverse {
|
|
||||||
for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
|
|
||||||
ns[i], ns[j] = ns[j], ns[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, sn := range s.Nodes {
|
|
||||||
for _, n := range ns {
|
|
||||||
if i != lasti {
|
|
||||||
f(sn, cloneNode(n))
|
|
||||||
} else {
|
|
||||||
if n.Parent != nil {
|
|
||||||
n.Parent.RemoveChild(n)
|
|
||||||
}
|
|
||||||
f(sn, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
275
vendor/github.com/PuerkitoBio/goquery/property.go
generated
vendored
275
vendor/github.com/PuerkitoBio/goquery/property.go
generated
vendored
@ -1,275 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
var rxClassTrim = regexp.MustCompile("[\t\r\n]")
|
|
||||||
|
|
||||||
// Attr gets the specified attribute's value for the first element in the
|
|
||||||
// Selection. To get the value for each element individually, use a looping
|
|
||||||
// construct such as Each or Map method.
|
|
||||||
func (s *Selection) Attr(attrName string) (val string, exists bool) {
|
|
||||||
if len(s.Nodes) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return getAttributeValue(attrName, s.Nodes[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttrOr works like Attr but returns default value if attribute is not present.
|
|
||||||
func (s *Selection) AttrOr(attrName, defaultValue string) string {
|
|
||||||
if len(s.Nodes) == 0 {
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
val, exists := getAttributeValue(attrName, s.Nodes[0])
|
|
||||||
if !exists {
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveAttr removes the named attribute from each element in the set of matched elements.
|
|
||||||
func (s *Selection) RemoveAttr(attrName string) *Selection {
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
removeAttr(n, attrName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAttr sets the given attribute on each element in the set of matched elements.
|
|
||||||
func (s *Selection) SetAttr(attrName, val string) *Selection {
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
attr := getAttributePtr(attrName, n)
|
|
||||||
if attr == nil {
|
|
||||||
n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
|
|
||||||
} else {
|
|
||||||
attr.Val = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text gets the combined text contents of each element in the set of matched
|
|
||||||
// elements, including their descendants.
|
|
||||||
func (s *Selection) Text() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
// Slightly optimized vs calling Each: no single selection object created
|
|
||||||
var f func(*html.Node)
|
|
||||||
f = func(n *html.Node) {
|
|
||||||
if n.Type == html.TextNode {
|
|
||||||
// Keep newlines and spaces, like jQuery
|
|
||||||
buf.WriteString(n.Data)
|
|
||||||
}
|
|
||||||
if n.FirstChild != nil {
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
f(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
f(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size is an alias for Length.
|
|
||||||
func (s *Selection) Size() int {
|
|
||||||
return s.Length()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Length returns the number of elements in the Selection object.
|
|
||||||
func (s *Selection) Length() int {
|
|
||||||
return len(s.Nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Html gets the HTML contents of the first element in the set of matched
|
|
||||||
// elements. It includes text and comment nodes.
|
|
||||||
func (s *Selection) Html() (ret string, e error) {
|
|
||||||
// Since there is no .innerHtml, the HTML content must be re-created from
|
|
||||||
// the nodes using html.Render.
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if len(s.Nodes) > 0 {
|
|
||||||
for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
e = html.Render(&buf, c)
|
|
||||||
if e != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret = buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddClass adds the given class(es) to each element in the set of matched elements.
|
|
||||||
// Multiple class names can be specified, separated by a space or via multiple arguments.
|
|
||||||
func (s *Selection) AddClass(class ...string) *Selection {
|
|
||||||
classStr := strings.TrimSpace(strings.Join(class, " "))
|
|
||||||
|
|
||||||
if classStr == "" {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
tcls := getClassesSlice(classStr)
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
curClasses, attr := getClassesAndAttr(n, true)
|
|
||||||
for _, newClass := range tcls {
|
|
||||||
if !strings.Contains(curClasses, " "+newClass+" ") {
|
|
||||||
curClasses += newClass + " "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setClasses(n, attr, curClasses)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasClass determines whether any of the matched elements are assigned the
|
|
||||||
// given class.
|
|
||||||
func (s *Selection) HasClass(class string) bool {
|
|
||||||
class = " " + class + " "
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
classes, _ := getClassesAndAttr(n, false)
|
|
||||||
if strings.Contains(classes, class) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveClass removes the given class(es) from each element in the set of matched elements.
|
|
||||||
// Multiple class names can be specified, separated by a space or via multiple arguments.
|
|
||||||
// If no class name is provided, all classes are removed.
|
|
||||||
func (s *Selection) RemoveClass(class ...string) *Selection {
|
|
||||||
var rclasses []string
|
|
||||||
|
|
||||||
classStr := strings.TrimSpace(strings.Join(class, " "))
|
|
||||||
remove := classStr == ""
|
|
||||||
|
|
||||||
if !remove {
|
|
||||||
rclasses = getClassesSlice(classStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
if remove {
|
|
||||||
removeAttr(n, "class")
|
|
||||||
} else {
|
|
||||||
classes, attr := getClassesAndAttr(n, true)
|
|
||||||
for _, rcl := range rclasses {
|
|
||||||
classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
setClasses(n, attr, classes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
|
|
||||||
// Multiple class names can be specified, separated by a space or via multiple arguments.
|
|
||||||
func (s *Selection) ToggleClass(class ...string) *Selection {
|
|
||||||
classStr := strings.TrimSpace(strings.Join(class, " "))
|
|
||||||
|
|
||||||
if classStr == "" {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
tcls := getClassesSlice(classStr)
|
|
||||||
|
|
||||||
for _, n := range s.Nodes {
|
|
||||||
classes, attr := getClassesAndAttr(n, true)
|
|
||||||
for _, tcl := range tcls {
|
|
||||||
if strings.Contains(classes, " "+tcl+" ") {
|
|
||||||
classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
|
|
||||||
} else {
|
|
||||||
classes += tcl + " "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setClasses(n, attr, classes)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, a := range n.Attr {
|
|
||||||
if a.Key == attrName {
|
|
||||||
return &n.Attr[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Private function to get the specified attribute's value from a node.
|
|
||||||
func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
|
|
||||||
if a := getAttributePtr(attrName, n); a != nil {
|
|
||||||
val = a.Val
|
|
||||||
exists = true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get and normalize the "class" attribute from the node.
|
|
||||||
func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
|
|
||||||
// Applies only to element nodes
|
|
||||||
if n.Type == html.ElementNode {
|
|
||||||
attr = getAttributePtr("class", n)
|
|
||||||
if attr == nil && create {
|
|
||||||
n.Attr = append(n.Attr, html.Attribute{
|
|
||||||
Key: "class",
|
|
||||||
Val: "",
|
|
||||||
})
|
|
||||||
attr = &n.Attr[len(n.Attr)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if attr == nil {
|
|
||||||
classes = " "
|
|
||||||
} else {
|
|
||||||
classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClassesSlice(classes string) []string {
|
|
||||||
return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeAttr(n *html.Node, attrName string) {
|
|
||||||
for i, a := range n.Attr {
|
|
||||||
if a.Key == attrName {
|
|
||||||
n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
|
|
||||||
n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setClasses(n *html.Node, attr *html.Attribute, classes string) {
|
|
||||||
classes = strings.TrimSpace(classes)
|
|
||||||
if classes == "" {
|
|
||||||
removeAttr(n, "class")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
attr.Val = classes
|
|
||||||
}
|
|
49
vendor/github.com/PuerkitoBio/goquery/query.go
generated
vendored
49
vendor/github.com/PuerkitoBio/goquery/query.go
generated
vendored
@ -1,49 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import "golang.org/x/net/html"
|
|
||||||
|
|
||||||
// Is checks the current matched set of elements against a selector and
|
|
||||||
// returns true if at least one of these elements matches.
|
|
||||||
func (s *Selection) Is(selector string) bool {
|
|
||||||
return s.IsMatcher(compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsMatcher checks the current matched set of elements against a matcher and
|
|
||||||
// returns true if at least one of these elements matches.
|
|
||||||
func (s *Selection) IsMatcher(m Matcher) bool {
|
|
||||||
if len(s.Nodes) > 0 {
|
|
||||||
if len(s.Nodes) == 1 {
|
|
||||||
return m.Match(s.Nodes[0])
|
|
||||||
}
|
|
||||||
return len(m.Filter(s.Nodes)) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFunction checks the current matched set of elements against a predicate and
|
|
||||||
// returns true if at least one of these elements matches.
|
|
||||||
func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
|
|
||||||
return s.FilterFunction(f).Length() > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsSelection checks the current matched set of elements against a Selection object
|
|
||||||
// and returns true if at least one of these elements matches.
|
|
||||||
func (s *Selection) IsSelection(sel *Selection) bool {
|
|
||||||
return s.FilterSelection(sel).Length() > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNodes checks the current matched set of elements against the specified nodes
|
|
||||||
// and returns true if at least one of these elements matches.
|
|
||||||
func (s *Selection) IsNodes(nodes ...*html.Node) bool {
|
|
||||||
return s.FilterNodes(nodes...).Length() > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains returns true if the specified Node is within,
|
|
||||||
// at any depth, one of the nodes in the Selection object.
|
|
||||||
// It is NOT inclusive, to behave like jQuery's implementation, and
|
|
||||||
// unlike Javascript's .contains, so if the contained
|
|
||||||
// node is itself in the selection, it returns false.
|
|
||||||
func (s *Selection) Contains(n *html.Node) bool {
|
|
||||||
return sliceContains(s.Nodes, n)
|
|
||||||
}
|
|
698
vendor/github.com/PuerkitoBio/goquery/traversal.go
generated
vendored
698
vendor/github.com/PuerkitoBio/goquery/traversal.go
generated
vendored
@ -1,698 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import "golang.org/x/net/html"
|
|
||||||
|
|
||||||
type siblingType int
|
|
||||||
|
|
||||||
// Sibling type, used internally when iterating over children at the same
|
|
||||||
// level (siblings) to specify which nodes are requested.
|
|
||||||
const (
|
|
||||||
siblingPrevUntil siblingType = iota - 3
|
|
||||||
siblingPrevAll
|
|
||||||
siblingPrev
|
|
||||||
siblingAll
|
|
||||||
siblingNext
|
|
||||||
siblingNextAll
|
|
||||||
siblingNextUntil
|
|
||||||
siblingAllIncludingNonElements
|
|
||||||
)
|
|
||||||
|
|
||||||
// Find gets the descendants of each element in the current set of matched
|
|
||||||
// elements, filtered by a selector. It returns a new Selection object
|
|
||||||
// containing these matched elements.
|
|
||||||
func (s *Selection) Find(selector string) *Selection {
|
|
||||||
return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindMatcher gets the descendants of each element in the current set of matched
|
|
||||||
// elements, filtered by the matcher. It returns a new Selection object
|
|
||||||
// containing these matched elements.
|
|
||||||
func (s *Selection) FindMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, findWithMatcher(s.Nodes, m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindSelection gets the descendants of each element in the current
|
|
||||||
// Selection, filtered by a Selection. It returns a new Selection object
|
|
||||||
// containing these matched elements.
|
|
||||||
func (s *Selection) FindSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return pushStack(s, nil)
|
|
||||||
}
|
|
||||||
return s.FindNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindNodes gets the descendants of each element in the current
|
|
||||||
// Selection, filtered by some nodes. It returns a new Selection object
|
|
||||||
// containing these matched elements.
|
|
||||||
func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
if sliceContains(s.Nodes, n) {
|
|
||||||
return []*html.Node{n}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contents gets the children of each element in the Selection,
|
|
||||||
// including text and comment nodes. It returns a new Selection object
|
|
||||||
// containing these elements.
|
|
||||||
func (s *Selection) Contents() *Selection {
|
|
||||||
return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentsFiltered gets the children of each element in the Selection,
|
|
||||||
// filtered by the specified selector. It returns a new Selection
|
|
||||||
// object containing these elements. Since selectors only act on Element nodes,
|
|
||||||
// this function is an alias to ChildrenFiltered unless the selector is empty,
|
|
||||||
// in which case it is an alias to Contents.
|
|
||||||
func (s *Selection) ContentsFiltered(selector string) *Selection {
|
|
||||||
if selector != "" {
|
|
||||||
return s.ChildrenFiltered(selector)
|
|
||||||
}
|
|
||||||
return s.Contents()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentsMatcher gets the children of each element in the Selection,
|
|
||||||
// filtered by the specified matcher. It returns a new Selection
|
|
||||||
// object containing these elements. Since matchers only act on Element nodes,
|
|
||||||
// this function is an alias to ChildrenMatcher.
|
|
||||||
func (s *Selection) ContentsMatcher(m Matcher) *Selection {
|
|
||||||
return s.ChildrenMatcher(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Children gets the child elements of each element in the Selection.
|
|
||||||
// It returns a new Selection object containing these elements.
|
|
||||||
func (s *Selection) Children() *Selection {
|
|
||||||
return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChildrenFiltered gets the child elements of each element in the Selection,
|
|
||||||
// filtered by the specified selector. It returns a new
|
|
||||||
// Selection object containing these elements.
|
|
||||||
func (s *Selection) ChildrenFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChildrenMatcher gets the child elements of each element in the Selection,
|
|
||||||
// filtered by the specified matcher. It returns a new
|
|
||||||
// Selection object containing these elements.
|
|
||||||
func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent gets the parent of each element in the Selection. It returns a
|
|
||||||
// new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) Parent() *Selection {
|
|
||||||
return pushStack(s, getParentNodes(s.Nodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentFiltered gets the parent of each element in the Selection filtered by a
|
|
||||||
// selector. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentMatcher gets the parent of each element in the Selection filtered by a
|
|
||||||
// matcher. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getParentNodes(s.Nodes), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closest gets the first element that matches the selector by testing the
|
|
||||||
// element itself and traversing up through its ancestors in the DOM tree.
|
|
||||||
func (s *Selection) Closest(selector string) *Selection {
|
|
||||||
cs := compileMatcher(selector)
|
|
||||||
return s.ClosestMatcher(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClosestMatcher gets the first element that matches the matcher by testing the
|
|
||||||
// element itself and traversing up through its ancestors in the DOM tree.
|
|
||||||
func (s *Selection) ClosestMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
// For each node in the selection, test the node itself, then each parent
|
|
||||||
// until a match is found.
|
|
||||||
for ; n != nil; n = n.Parent {
|
|
||||||
if m.Match(n) {
|
|
||||||
return []*html.Node{n}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClosestNodes gets the first element that matches one of the nodes by testing the
|
|
||||||
// element itself and traversing up through its ancestors in the DOM tree.
|
|
||||||
func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
|
|
||||||
set := make(map[*html.Node]bool)
|
|
||||||
for _, n := range nodes {
|
|
||||||
set[n] = true
|
|
||||||
}
|
|
||||||
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
// For each node in the selection, test the node itself, then each parent
|
|
||||||
// until a match is found.
|
|
||||||
for ; n != nil; n = n.Parent {
|
|
||||||
if set[n] {
|
|
||||||
return []*html.Node{n}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClosestSelection gets the first element that matches one of the nodes in the
|
|
||||||
// Selection by testing the element itself and traversing up through its ancestors
|
|
||||||
// in the DOM tree.
|
|
||||||
func (s *Selection) ClosestSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return pushStack(s, nil)
|
|
||||||
}
|
|
||||||
return s.ClosestNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parents gets the ancestors of each element in the current Selection. It
|
|
||||||
// returns a new Selection object with the matched elements.
|
|
||||||
func (s *Selection) Parents() *Selection {
|
|
||||||
return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFiltered gets the ancestors of each element in the current
|
|
||||||
// Selection. It returns a new Selection object with the matched elements.
|
|
||||||
func (s *Selection) ParentsFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsMatcher gets the ancestors of each element in the current
|
|
||||||
// Selection. It returns a new Selection object with the matched elements.
|
|
||||||
func (s *Selection) ParentsMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsUntil gets the ancestors of each element in the Selection, up to but
|
|
||||||
// not including the element matched by the selector. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsUntil(selector string) *Selection {
|
|
||||||
return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
|
|
||||||
// not including the element matched by the matcher. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, getParentsNodes(s.Nodes, m, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsUntilSelection gets the ancestors of each element in the Selection,
|
|
||||||
// up to but not including the elements in the specified Selection. It returns a
|
|
||||||
// new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.Parents()
|
|
||||||
}
|
|
||||||
return s.ParentsUntilNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsUntilNodes gets the ancestors of each element in the Selection,
|
|
||||||
// up to but not including the specified nodes. It returns a
|
|
||||||
// new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
|
|
||||||
// results based on a selector string. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
|
|
||||||
// results based on a matcher. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
|
|
||||||
return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.ParentsMatcher(filter)
|
|
||||||
}
|
|
||||||
return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Siblings gets the siblings of each element in the Selection. It returns
|
|
||||||
// a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) Siblings() *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SiblingsFiltered gets the siblings of each element in the Selection
|
|
||||||
// filtered by a selector. It returns a new Selection object containing the
|
|
||||||
// matched elements.
|
|
||||||
func (s *Selection) SiblingsFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SiblingsMatcher gets the siblings of each element in the Selection
|
|
||||||
// filtered by a matcher. It returns a new Selection object containing the
|
|
||||||
// matched elements.
|
|
||||||
func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next gets the immediately following sibling of each element in the
|
|
||||||
// Selection. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) Next() *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextFiltered gets the immediately following sibling of each element in the
|
|
||||||
// Selection filtered by a selector. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) NextFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextMatcher gets the immediately following sibling of each element in the
|
|
||||||
// Selection filtered by a matcher. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) NextMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextAll gets all the following siblings of each element in the
|
|
||||||
// Selection. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextAll() *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextAllFiltered gets all the following siblings of each element in the
|
|
||||||
// Selection filtered by a selector. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) NextAllFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextAllMatcher gets all the following siblings of each element in the
|
|
||||||
// Selection filtered by a matcher. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) NextAllMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prev gets the immediately preceding sibling of each element in the
|
|
||||||
// Selection. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) Prev() *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevFiltered gets the immediately preceding sibling of each element in the
|
|
||||||
// Selection filtered by a selector. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) PrevFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevMatcher gets the immediately preceding sibling of each element in the
|
|
||||||
// Selection filtered by a matcher. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) PrevMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevAll gets all the preceding siblings of each element in the
|
|
||||||
// Selection. It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevAll() *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevAllFiltered gets all the preceding siblings of each element in the
|
|
||||||
// Selection filtered by a selector. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) PrevAllFiltered(selector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevAllMatcher gets all the preceding siblings of each element in the
|
|
||||||
// Selection filtered by a matcher. It returns a new Selection object
|
|
||||||
// containing the matched elements.
|
|
||||||
func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextUntil gets all following siblings of each element up to but not
|
|
||||||
// including the element matched by the selector. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) NextUntil(selector string) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
compileMatcher(selector), nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextUntilMatcher gets all following siblings of each element up to but not
|
|
||||||
// including the element matched by the matcher. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
m, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextUntilSelection gets all following siblings of each element up to but not
|
|
||||||
// including the element matched by the Selection. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.NextAll()
|
|
||||||
}
|
|
||||||
return s.NextUntilNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextUntilNodes gets all following siblings of each element up to but not
|
|
||||||
// including the element matched by the nodes. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
nil, nodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevUntil gets all preceding siblings of each element up to but not
|
|
||||||
// including the element matched by the selector. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) PrevUntil(selector string) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
compileMatcher(selector), nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevUntilMatcher gets all preceding siblings of each element up to but not
|
|
||||||
// including the element matched by the matcher. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
m, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevUntilSelection gets all preceding siblings of each element up to but not
|
|
||||||
// including the element matched by the Selection. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.PrevAll()
|
|
||||||
}
|
|
||||||
return s.PrevUntilNodes(sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevUntilNodes gets all preceding siblings of each element up to but not
|
|
||||||
// including the element matched by the nodes. It returns a new Selection
|
|
||||||
// object containing the matched elements.
|
|
||||||
func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
|
|
||||||
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
nil, nodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextFilteredUntil is like NextUntil, with the option to filter
|
|
||||||
// the results based on a selector string.
|
|
||||||
// It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
|
|
||||||
// the results based on a matcher.
|
|
||||||
// It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
until, nil), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextFilteredUntilSelection is like NextUntilSelection, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
|
|
||||||
return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextMatcherUntilSelection is like NextUntilSelection, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.NextMatcher(filter)
|
|
||||||
}
|
|
||||||
return s.NextMatcherUntilNodes(filter, sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextFilteredUntilNodes is like NextUntilNodes, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
nil, nodes), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextMatcherUntilNodes is like NextUntilNodes, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
|
|
||||||
nil, nodes), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevFilteredUntil is like PrevUntil, with the option to filter
|
|
||||||
// the results based on a selector string.
|
|
||||||
// It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
|
|
||||||
// the results based on a matcher.
|
|
||||||
// It returns a new Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
until, nil), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevFilteredUntilSelection is like PrevUntilSelection, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
|
|
||||||
return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevMatcherUntilSelection is like PrevUntilSelection, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
|
|
||||||
if sel == nil {
|
|
||||||
return s.PrevMatcher(filter)
|
|
||||||
}
|
|
||||||
return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevFilteredUntilNodes is like PrevUntilNodes, with the
|
|
||||||
// option to filter the results based on a selector string. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
nil, nodes), compileMatcher(filterSelector))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrevMatcherUntilNodes is like PrevUntilNodes, with the
|
|
||||||
// option to filter the results based on a matcher. It returns a new
|
|
||||||
// Selection object containing the matched elements.
|
|
||||||
func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
|
|
||||||
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
|
|
||||||
nil, nodes), filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter and push filters the nodes based on a matcher, and pushes the results
|
|
||||||
// on the stack, with the srcSel as previous selection.
|
|
||||||
func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
|
|
||||||
// Create a temporary Selection with the specified nodes to filter using winnow
|
|
||||||
sel := &Selection{nodes, srcSel.document, nil}
|
|
||||||
// Filter based on matcher and push on stack
|
|
||||||
return pushStack(srcSel, winnow(sel, m, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal implementation of Find that return raw nodes.
|
|
||||||
func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
|
|
||||||
// Map nodes to find the matches within the children of each node
|
|
||||||
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
|
|
||||||
// Go down one level, becausejQuery's Find selects only within descendants
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if c.Type == html.ElementNode {
|
|
||||||
result = append(result, m.MatchAll(c)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal implementation to get all parent nodes, stopping at the specified
|
|
||||||
// node (or nil if no stop).
|
|
||||||
func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
|
|
||||||
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
|
|
||||||
for p := n.Parent; p != nil; p = p.Parent {
|
|
||||||
sel := newSingleSelection(p, nil)
|
|
||||||
if stopm != nil {
|
|
||||||
if sel.IsMatcher(stopm) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if len(stopNodes) > 0 {
|
|
||||||
if sel.IsNodes(stopNodes...) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.Type == html.ElementNode {
|
|
||||||
result = append(result, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal implementation of sibling nodes that return a raw slice of matches.
|
|
||||||
func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
|
|
||||||
var f func(*html.Node) bool
|
|
||||||
|
|
||||||
// If the requested siblings are ...Until, create the test function to
|
|
||||||
// determine if the until condition is reached (returns true if it is)
|
|
||||||
if st == siblingNextUntil || st == siblingPrevUntil {
|
|
||||||
f = func(n *html.Node) bool {
|
|
||||||
if untilm != nil {
|
|
||||||
// Matcher-based condition
|
|
||||||
sel := newSingleSelection(n, nil)
|
|
||||||
return sel.IsMatcher(untilm)
|
|
||||||
} else if len(untilNodes) > 0 {
|
|
||||||
// Nodes-based condition
|
|
||||||
sel := newSingleSelection(n, nil)
|
|
||||||
return sel.IsNodes(untilNodes...)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
return getChildrenWithSiblingType(n.Parent, st, n, f)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the children nodes of each node in the specified slice of nodes,
|
|
||||||
// based on the sibling type request.
|
|
||||||
func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
|
|
||||||
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
return getChildrenWithSiblingType(n, st, nil, nil)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the children of the specified parent, based on the requested sibling
|
|
||||||
// type, skipping a specified node if required.
|
|
||||||
func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
|
|
||||||
untilFunc func(*html.Node) bool) (result []*html.Node) {
|
|
||||||
|
|
||||||
// Create the iterator function
|
|
||||||
var iter = func(cur *html.Node) (ret *html.Node) {
|
|
||||||
// Based on the sibling type requested, iterate the right way
|
|
||||||
for {
|
|
||||||
switch st {
|
|
||||||
case siblingAll, siblingAllIncludingNonElements:
|
|
||||||
if cur == nil {
|
|
||||||
// First iteration, start with first child of parent
|
|
||||||
// Skip node if required
|
|
||||||
if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
|
|
||||||
ret = skipNode.NextSibling
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Skip node if required
|
|
||||||
if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
|
|
||||||
ret = skipNode.NextSibling
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case siblingPrev, siblingPrevAll, siblingPrevUntil:
|
|
||||||
if cur == nil {
|
|
||||||
// Start with previous sibling of the skip node
|
|
||||||
ret = skipNode.PrevSibling
|
|
||||||
} else {
|
|
||||||
ret = cur.PrevSibling
|
|
||||||
}
|
|
||||||
case siblingNext, siblingNextAll, siblingNextUntil:
|
|
||||||
if cur == nil {
|
|
||||||
// Start with next sibling of the skip node
|
|
||||||
ret = skipNode.NextSibling
|
|
||||||
} else {
|
|
||||||
ret = cur.NextSibling
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("Invalid sibling type.")
|
|
||||||
}
|
|
||||||
if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Not a valid node, try again from this one
|
|
||||||
cur = ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for c := iter(nil); c != nil; c = iter(c) {
|
|
||||||
// If this is an ...Until case, test before append (returns true
|
|
||||||
// if the until condition is reached)
|
|
||||||
if st == siblingNextUntil || st == siblingPrevUntil {
|
|
||||||
if untilFunc(c) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result = append(result, c)
|
|
||||||
if st == siblingNext || st == siblingPrev {
|
|
||||||
// Only one node was requested (immediate next or previous), so exit
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal implementation of parent nodes that return a raw slice of Nodes.
|
|
||||||
func getParentNodes(nodes []*html.Node) []*html.Node {
|
|
||||||
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
|
|
||||||
if n.Parent != nil && n.Parent.Type == html.ElementNode {
|
|
||||||
return []*html.Node{n.Parent}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal map function used by many traversing methods. Takes the source nodes
|
|
||||||
// to iterate on and the mapping function that returns an array of nodes.
|
|
||||||
// Returns an array of nodes mapped by calling the callback function once for
|
|
||||||
// each node in the source nodes.
|
|
||||||
func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
|
|
||||||
set := make(map[*html.Node]bool)
|
|
||||||
for i, n := range nodes {
|
|
||||||
if vals := f(i, n); len(vals) > 0 {
|
|
||||||
result = appendWithoutDuplicates(result, vals, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
141
vendor/github.com/PuerkitoBio/goquery/type.go
generated
vendored
141
vendor/github.com/PuerkitoBio/goquery/type.go
generated
vendored
@ -1,141 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/andybalholm/cascadia"
|
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Document represents an HTML document to be manipulated. Unlike jQuery, which
|
|
||||||
// is loaded as part of a DOM document, and thus acts upon its containing
|
|
||||||
// document, GoQuery doesn't know which HTML document to act upon. So it needs
|
|
||||||
// to be told, and that's what the Document class is for. It holds the root
|
|
||||||
// document node to manipulate, and can make selections on this document.
|
|
||||||
type Document struct {
|
|
||||||
*Selection
|
|
||||||
Url *url.URL
|
|
||||||
rootNode *html.Node
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDocumentFromNode is a Document constructor that takes a root html Node
|
|
||||||
// as argument.
|
|
||||||
func NewDocumentFromNode(root *html.Node) *Document {
|
|
||||||
return newDocument(root, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDocument is a Document constructor that takes a string URL as argument.
|
|
||||||
// It loads the specified document, parses it, and stores the root Document
|
|
||||||
// node, ready to be manipulated.
|
|
||||||
//
|
|
||||||
// Deprecated: Use the net/http standard library package to make the request
|
|
||||||
// and validate the response before calling goquery.NewDocumentFromReader
|
|
||||||
// with the response's body.
|
|
||||||
func NewDocument(url string) (*Document, error) {
|
|
||||||
// Load the URL
|
|
||||||
res, e := http.Get(url)
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
return NewDocumentFromResponse(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDocumentFromReader returns a Document from an io.Reader.
|
|
||||||
// It returns an error as second value if the reader's data cannot be parsed
|
|
||||||
// as html. It does not check if the reader is also an io.Closer, the
|
|
||||||
// provided reader is never closed by this call. It is the responsibility
|
|
||||||
// of the caller to close it if required.
|
|
||||||
func NewDocumentFromReader(r io.Reader) (*Document, error) {
|
|
||||||
root, e := html.Parse(r)
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
return newDocument(root, nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
|
|
||||||
// It loads the specified response's document, parses it, and stores the root Document
|
|
||||||
// node, ready to be manipulated. The response's body is closed on return.
|
|
||||||
//
|
|
||||||
// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
|
|
||||||
func NewDocumentFromResponse(res *http.Response) (*Document, error) {
|
|
||||||
if res == nil {
|
|
||||||
return nil, errors.New("Response is nil")
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.Request == nil {
|
|
||||||
return nil, errors.New("Response.Request is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the HTML into nodes
|
|
||||||
root, e := html.Parse(res.Body)
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and fill the document
|
|
||||||
return newDocument(root, res.Request.URL), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloneDocument creates a deep-clone of a document.
|
|
||||||
func CloneDocument(doc *Document) *Document {
|
|
||||||
return newDocument(cloneNode(doc.rootNode), doc.Url)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Private constructor, make sure all fields are correctly filled.
|
|
||||||
func newDocument(root *html.Node, url *url.URL) *Document {
|
|
||||||
// Create and fill the document
|
|
||||||
d := &Document{nil, url, root}
|
|
||||||
d.Selection = newSingleSelection(root, d)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Selection represents a collection of nodes matching some criteria. The
|
|
||||||
// initial Selection can be created by using Document.Find, and then
|
|
||||||
// manipulated using the jQuery-like chainable syntax and methods.
|
|
||||||
type Selection struct {
|
|
||||||
Nodes []*html.Node
|
|
||||||
document *Document
|
|
||||||
prevSel *Selection
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper constructor to create an empty selection
|
|
||||||
func newEmptySelection(doc *Document) *Selection {
|
|
||||||
return &Selection{nil, doc, nil}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper constructor to create a selection of only one node
|
|
||||||
func newSingleSelection(node *html.Node, doc *Document) *Selection {
|
|
||||||
return &Selection{[]*html.Node{node}, doc, nil}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matcher is an interface that defines the methods to match
|
|
||||||
// HTML nodes against a compiled selector string. Cascadia's
|
|
||||||
// Selector implements this interface.
|
|
||||||
type Matcher interface {
|
|
||||||
Match(*html.Node) bool
|
|
||||||
MatchAll(*html.Node) []*html.Node
|
|
||||||
Filter([]*html.Node) []*html.Node
|
|
||||||
}
|
|
||||||
|
|
||||||
// compileMatcher compiles the selector string s and returns
|
|
||||||
// the corresponding Matcher. If s is an invalid selector string,
|
|
||||||
// it returns a Matcher that fails all matches.
|
|
||||||
func compileMatcher(s string) Matcher {
|
|
||||||
cs, err := cascadia.Compile(s)
|
|
||||||
if err != nil {
|
|
||||||
return invalidMatcher{}
|
|
||||||
}
|
|
||||||
return cs
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidMatcher is a Matcher that always fails to match.
|
|
||||||
type invalidMatcher struct{}
|
|
||||||
|
|
||||||
func (invalidMatcher) Match(n *html.Node) bool { return false }
|
|
||||||
func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
|
|
||||||
func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }
|
|
161
vendor/github.com/PuerkitoBio/goquery/utilities.go
generated
vendored
161
vendor/github.com/PuerkitoBio/goquery/utilities.go
generated
vendored
@ -1,161 +0,0 @@
|
|||||||
package goquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// used to determine if a set (map[*html.Node]bool) should be used
|
|
||||||
// instead of iterating over a slice. The set uses more memory and
|
|
||||||
// is slower than slice iteration for small N.
|
|
||||||
const minNodesForSet = 1000
|
|
||||||
|
|
||||||
var nodeNames = []string{
|
|
||||||
html.ErrorNode: "#error",
|
|
||||||
html.TextNode: "#text",
|
|
||||||
html.DocumentNode: "#document",
|
|
||||||
html.CommentNode: "#comment",
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeName returns the node name of the first element in the selection.
|
|
||||||
// It tries to behave in a similar way as the DOM's nodeName property
|
|
||||||
// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
|
|
||||||
//
|
|
||||||
// Go's net/html package defines the following node types, listed with
|
|
||||||
// the corresponding returned value from this function:
|
|
||||||
//
|
|
||||||
// ErrorNode : #error
|
|
||||||
// TextNode : #text
|
|
||||||
// DocumentNode : #document
|
|
||||||
// ElementNode : the element's tag name
|
|
||||||
// CommentNode : #comment
|
|
||||||
// DoctypeNode : the name of the document type
|
|
||||||
//
|
|
||||||
func NodeName(s *Selection) string {
|
|
||||||
if s.Length() == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch n := s.Get(0); n.Type {
|
|
||||||
case html.ElementNode, html.DoctypeNode:
|
|
||||||
return n.Data
|
|
||||||
default:
|
|
||||||
if n.Type >= 0 && int(n.Type) < len(nodeNames) {
|
|
||||||
return nodeNames[n.Type]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OuterHtml returns the outer HTML rendering of the first item in
|
|
||||||
// the selection - that is, the HTML including the first element's
|
|
||||||
// tag and attributes.
|
|
||||||
//
|
|
||||||
// Unlike InnerHtml, this is a function and not a method on the Selection,
|
|
||||||
// because this is not a jQuery method (in javascript-land, this is
|
|
||||||
// a property provided by the DOM).
|
|
||||||
func OuterHtml(s *Selection) (string, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if s.Length() == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
n := s.Get(0)
|
|
||||||
if err := html.Render(&buf, n); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return buf.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through all container nodes to search for the target node.
|
|
||||||
func sliceContains(container []*html.Node, contained *html.Node) bool {
|
|
||||||
for _, n := range container {
|
|
||||||
if nodeContains(n, contained) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if the contained node is within the container node.
|
|
||||||
func nodeContains(container *html.Node, contained *html.Node) bool {
|
|
||||||
// Check if the parent of the contained node is the container node, traversing
|
|
||||||
// upward until the top is reached, or the container is found.
|
|
||||||
for contained = contained.Parent; contained != nil; contained = contained.Parent {
|
|
||||||
if container == contained {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if the target node is in the slice of nodes.
|
|
||||||
func isInSlice(slice []*html.Node, node *html.Node) bool {
|
|
||||||
return indexInSlice(slice, node) > -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the index of the target node in the slice, or -1.
|
|
||||||
func indexInSlice(slice []*html.Node, node *html.Node) int {
|
|
||||||
if node != nil {
|
|
||||||
for i, n := range slice {
|
|
||||||
if n == node {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Appends the new nodes to the target slice, making sure no duplicate is added.
|
|
||||||
// There is no check to the original state of the target slice, so it may still
|
|
||||||
// contain duplicates. The target slice is returned because append() may create
|
|
||||||
// a new underlying array. If targetSet is nil, a local set is created with the
|
|
||||||
// target if len(target) + len(nodes) is greater than minNodesForSet.
|
|
||||||
func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
|
|
||||||
// if there are not that many nodes, don't use the map, faster to just use nested loops
|
|
||||||
// (unless a non-nil targetSet is passed, in which case the caller knows better).
|
|
||||||
if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
|
|
||||||
for _, n := range nodes {
|
|
||||||
if !isInSlice(target, n) {
|
|
||||||
target = append(target, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return target
|
|
||||||
}
|
|
||||||
|
|
||||||
// if a targetSet is passed, then assume it is reliable, otherwise create one
|
|
||||||
// and initialize it with the current target contents.
|
|
||||||
if targetSet == nil {
|
|
||||||
targetSet = make(map[*html.Node]bool, len(target))
|
|
||||||
for _, n := range target {
|
|
||||||
targetSet[n] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, n := range nodes {
|
|
||||||
if !targetSet[n] {
|
|
||||||
target = append(target, n)
|
|
||||||
targetSet[n] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return target
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through a selection, returning only those nodes that pass the predicate
|
|
||||||
// function.
|
|
||||||
func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
|
|
||||||
for i, n := range sel.Nodes {
|
|
||||||
if predicate(i, newSingleSelection(n, sel.document)) {
|
|
||||||
result = append(result, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new Selection object based on the specified nodes, and keeps the
|
|
||||||
// source Selection object on the stack (linked list).
|
|
||||||
func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
|
|
||||||
result := &Selection{nodes, fromSel.document, fromSel}
|
|
||||||
return result
|
|
||||||
}
|
|
11
vendor/github.com/RoaringBitmap/roaring/AUTHORS
generated
vendored
11
vendor/github.com/RoaringBitmap/roaring/AUTHORS
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
# This is the official list of roaring authors for copyright purposes.
|
|
||||||
|
|
||||||
Todd Gruben (@tgruben),
|
|
||||||
Daniel Lemire (@lemire),
|
|
||||||
Elliot Murphy (@statik),
|
|
||||||
Bob Potter (@bpot),
|
|
||||||
Tyson Maly (@tvmaly),
|
|
||||||
Will Glynn (@willglynn),
|
|
||||||
Brent Pedersen (@brentp)
|
|
||||||
Maciej Biłas (@maciej),
|
|
||||||
Joe Nall (@joenall)
|
|
15
vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
generated
vendored
15
vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
# This is the official list of roaring contributors
|
|
||||||
|
|
||||||
Todd Gruben (@tgruben),
|
|
||||||
Daniel Lemire (@lemire),
|
|
||||||
Elliot Murphy (@statik),
|
|
||||||
Bob Potter (@bpot),
|
|
||||||
Tyson Maly (@tvmaly),
|
|
||||||
Will Glynn (@willglynn),
|
|
||||||
Brent Pedersen (@brentp),
|
|
||||||
Jason E. Aten (@glycerine),
|
|
||||||
Vali Malinoiu (@0x4139),
|
|
||||||
Forud Ghafouri (@fzerorubigd),
|
|
||||||
Joe Nall (@joenall),
|
|
||||||
(@fredim),
|
|
||||||
Edd Robinson (@e-dard)
|
|
202
vendor/github.com/RoaringBitmap/roaring/LICENSE
generated
vendored
202
vendor/github.com/RoaringBitmap/roaring/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2016 by the authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
202
vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt
generated
vendored
202
vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2016 by the authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
111
vendor/github.com/RoaringBitmap/roaring/Makefile
generated
vendored
111
vendor/github.com/RoaringBitmap/roaring/Makefile
generated
vendored
@ -1,111 +0,0 @@
|
|||||||
.PHONY: help all test format fmtcheck vet lint qa deps clean nuke ser fetch-real-roaring-datasets
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Display general help about this command
|
|
||||||
help:
|
|
||||||
@echo ""
|
|
||||||
@echo "The following commands are available:"
|
|
||||||
@echo ""
|
|
||||||
@echo " make qa : Run all the tests"
|
|
||||||
@echo " make test : Run the unit tests"
|
|
||||||
@echo ""
|
|
||||||
@echo " make format : Format the source code"
|
|
||||||
@echo " make fmtcheck : Check if the source code has been formatted"
|
|
||||||
@echo " make vet : Check for suspicious constructs"
|
|
||||||
@echo " make lint : Check for style errors"
|
|
||||||
@echo ""
|
|
||||||
@echo " make deps : Get the dependencies"
|
|
||||||
@echo " make clean : Remove any build artifact"
|
|
||||||
@echo " make nuke : Deletes any intermediate file"
|
|
||||||
@echo ""
|
|
||||||
@echo " make fuzz-smat : Fuzzy testing with smat"
|
|
||||||
@echo " make fuzz-stream : Fuzzy testing with stream deserialization"
|
|
||||||
@echo " make fuzz-buffer : Fuzzy testing with buffer deserialization"
|
|
||||||
@echo ""
|
|
||||||
|
|
||||||
# Alias for help target
|
|
||||||
all: help
|
|
||||||
test:
|
|
||||||
go test
|
|
||||||
go test -race -run TestConcurrent*
|
|
||||||
# Format the source code
|
|
||||||
format:
|
|
||||||
@find ./ -type f -name "*.go" -exec gofmt -w {} \;
|
|
||||||
|
|
||||||
# Check if the source code has been formatted
|
|
||||||
fmtcheck:
|
|
||||||
@mkdir -p target
|
|
||||||
@find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff
|
|
||||||
@test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
|
|
||||||
|
|
||||||
# Check for syntax errors
|
|
||||||
vet:
|
|
||||||
GOPATH=$(GOPATH) go vet ./...
|
|
||||||
|
|
||||||
# Check for style errors
|
|
||||||
lint:
|
|
||||||
GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./...
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Alias to run all quality-assurance checks
|
|
||||||
qa: fmtcheck test vet lint
|
|
||||||
|
|
||||||
# --- INSTALL ---
|
|
||||||
|
|
||||||
# Get the dependencies
|
|
||||||
deps:
|
|
||||||
GOPATH=$(GOPATH) go get github.com/smartystreets/goconvey/convey
|
|
||||||
GOPATH=$(GOPATH) go get github.com/willf/bitset
|
|
||||||
GOPATH=$(GOPATH) go get github.com/golang/lint/golint
|
|
||||||
GOPATH=$(GOPATH) go get github.com/mschoch/smat
|
|
||||||
GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz
|
|
||||||
GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
|
||||||
GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream
|
|
||||||
GOPATH=$(GOPATH) go get github.com/philhofer/fwd
|
|
||||||
GOPATH=$(GOPATH) go get github.com/jtolds/gls
|
|
||||||
|
|
||||||
fuzz-smat:
|
|
||||||
go test -tags=gofuzz -run=TestGenerateSmatCorpus
|
|
||||||
go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
|
|
||||||
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
|
|
||||||
|
|
||||||
|
|
||||||
fuzz-stream:
|
|
||||||
go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring
|
|
||||||
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
|
|
||||||
|
|
||||||
|
|
||||||
fuzz-buffer:
|
|
||||||
go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring
|
|
||||||
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
|
|
||||||
|
|
||||||
# Remove any build artifact
|
|
||||||
clean:
|
|
||||||
GOPATH=$(GOPATH) go clean ./...
|
|
||||||
|
|
||||||
# Deletes any intermediate file
|
|
||||||
nuke:
|
|
||||||
rm -rf ./target
|
|
||||||
GOPATH=$(GOPATH) go clean -i ./...
|
|
||||||
|
|
||||||
|
|
||||||
ser:
|
|
||||||
go generate
|
|
||||||
|
|
||||||
cover:
|
|
||||||
go test -coverprofile=coverage.out
|
|
||||||
go tool cover -html=coverage.out
|
|
||||||
|
|
||||||
fetch-real-roaring-datasets:
|
|
||||||
# pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets
|
|
||||||
git submodule init
|
|
||||||
git submodule update
|
|
247
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
247
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
@ -1,247 +0,0 @@
|
|||||||
roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![Coverage Status](https://coveralls.io/repos/github/RoaringBitmap/roaring/badge.svg?branch=master)](https://coveralls.io/github/RoaringBitmap/roaring?branch=master) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring)
|
|
||||||
=============
|
|
||||||
|
|
||||||
This is a go version of the Roaring bitmap data structure.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Roaring bitmaps are used by several major systems such as [Apache Lucene][lucene] and derivative systems such as [Solr][solr] and
|
|
||||||
[Elasticsearch][elasticsearch], [Metamarkets' Druid][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin].
|
|
||||||
|
|
||||||
[lucene]: https://lucene.apache.org/
|
|
||||||
[solr]: https://lucene.apache.org/solr/
|
|
||||||
[elasticsearch]: https://www.elastic.co/products/elasticsearch
|
|
||||||
[druid]: http://druid.io/
|
|
||||||
[spark]: https://spark.apache.org/
|
|
||||||
[opensearchserver]: http://www.opensearchserver.com
|
|
||||||
[cloudtorrent]: https://github.com/jpillora/cloud-torrent
|
|
||||||
[whoosh]: https://bitbucket.org/mchaput/whoosh/wiki/Home
|
|
||||||
[pilosa]: https://www.pilosa.com/
|
|
||||||
[kylin]: http://kylin.apache.org/
|
|
||||||
[pinot]: http://github.com/linkedin/pinot/wiki
|
|
||||||
[vsts]: https://www.visualstudio.com/team-services/
|
|
||||||
[atlas]: https://github.com/Netflix/atlas
|
|
||||||
|
|
||||||
Roaring bitmaps are found to work well in many important applications:
|
|
||||||
|
|
||||||
> Use Roaring for bitmap compression whenever possible. Do not use other bitmap compression methods ([Wang et al., SIGMOD 2017](http://db.ucsd.edu/wp-content/uploads/2017/03/sidm338-wangA.pdf))
|
|
||||||
|
|
||||||
|
|
||||||
The ``roaring`` Go library is used by
|
|
||||||
* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client
|
|
||||||
* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative
|
|
||||||
* [InfluxDB](https://www.influxdata.com)
|
|
||||||
* [Pilosa](https://www.pilosa.com/)
|
|
||||||
* [Bleve](http://www.blevesearch.com)
|
|
||||||
|
|
||||||
This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com).
|
|
||||||
|
|
||||||
|
|
||||||
There are also [Java](https://github.com/RoaringBitmap/RoaringBitmap) and [C/C++](https://github.com/RoaringBitmap/CRoaring) versions. The Java, C, C++ and Go version are binary compatible: e.g, you can save bitmaps
|
|
||||||
from a Java program and load them back in Go, and vice versa. We have a [format specification](https://github.com/RoaringBitmap/RoaringFormatSpec).
|
|
||||||
|
|
||||||
|
|
||||||
This code is licensed under Apache License, Version 2.0 (ASL2.0).
|
|
||||||
|
|
||||||
Copyright 2016-... by the authors.
|
|
||||||
|
|
||||||
|
|
||||||
### References
|
|
||||||
|
|
||||||
- Daniel Lemire, Owen Kaser, Nathan Kurz, Luca Deri, Chris O'Hara, François Saint-Jacques, Gregory Ssi-Yan-Kai, Roaring Bitmaps: Implementation of an Optimized Software Library, Software: Practice and Experience 48 (4), 2018 [arXiv:1709.07821](https://arxiv.org/abs/1709.07821)
|
|
||||||
- Samy Chambi, Daniel Lemire, Owen Kaser, Robert Godin,
|
|
||||||
Better bitmap performance with Roaring bitmaps,
|
|
||||||
Software: Practice and Experience 46 (5), 2016.
|
|
||||||
http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/realroaring2014.html
|
|
||||||
- Daniel Lemire, Gregory Ssi-Yan-Kai, Owen Kaser, Consistently faster and smaller compressed bitmaps with Roaring, Software: Practice and Experience 46 (11), 2016. http://arxiv.org/abs/1603.06549
|
|
||||||
|
|
||||||
|
|
||||||
### Dependencies
|
|
||||||
|
|
||||||
Dependencies are fetched automatically by giving the `-t` flag to `go get`.
|
|
||||||
|
|
||||||
they include
|
|
||||||
- github.com/smartystreets/goconvey/convey
|
|
||||||
- github.com/willf/bitset
|
|
||||||
- github.com/mschoch/smat
|
|
||||||
- github.com/glycerine/go-unsnap-stream
|
|
||||||
- github.com/philhofer/fwd
|
|
||||||
- github.com/jtolds/gls
|
|
||||||
|
|
||||||
Note that the smat library requires Go 1.6 or better.
|
|
||||||
|
|
||||||
#### Installation
|
|
||||||
|
|
||||||
- go get -t github.com/RoaringBitmap/roaring
|
|
||||||
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
Here is a simplified but complete example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/RoaringBitmap/roaring"
|
|
||||||
"bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// example inspired by https://github.com/fzandona/goroar
|
|
||||||
fmt.Println("==roaring==")
|
|
||||||
rb1 := roaring.BitmapOf(1, 2, 3, 4, 5, 100, 1000)
|
|
||||||
fmt.Println(rb1.String())
|
|
||||||
|
|
||||||
rb2 := roaring.BitmapOf(3, 4, 1000)
|
|
||||||
fmt.Println(rb2.String())
|
|
||||||
|
|
||||||
rb3 := roaring.New()
|
|
||||||
fmt.Println(rb3.String())
|
|
||||||
|
|
||||||
fmt.Println("Cardinality: ", rb1.GetCardinality())
|
|
||||||
|
|
||||||
fmt.Println("Contains 3? ", rb1.Contains(3))
|
|
||||||
|
|
||||||
rb1.And(rb2)
|
|
||||||
|
|
||||||
rb3.Add(1)
|
|
||||||
rb3.Add(5)
|
|
||||||
|
|
||||||
rb3.Or(rb1)
|
|
||||||
|
|
||||||
// computes union of the three bitmaps in parallel using 4 workers
|
|
||||||
roaring.ParOr(4, rb1, rb2, rb3)
|
|
||||||
// computes intersection of the three bitmaps in parallel using 4 workers
|
|
||||||
roaring.ParAnd(4, rb1, rb2, rb3)
|
|
||||||
|
|
||||||
|
|
||||||
// prints 1, 3, 4, 5, 1000
|
|
||||||
i := rb3.Iterator()
|
|
||||||
for i.HasNext() {
|
|
||||||
fmt.Println(i.Next())
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
// next we include an example of serialization
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
rb1.WriteTo(buf) // we omit error handling
|
|
||||||
newrb:= roaring.New()
|
|
||||||
newrb.ReadFrom(buf)
|
|
||||||
if rb1.Equals(newrb) {
|
|
||||||
fmt.Println("I wrote the content to a byte stream and read it back.")
|
|
||||||
}
|
|
||||||
// you can iterate over bitmaps using ReverseIterator(), Iterator, ManyIterator()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you wish to use serialization and handle errors, you might want to
|
|
||||||
consider the following sample of code:
|
|
||||||
|
|
||||||
```go
|
|
||||||
rb := BitmapOf(1, 2, 3, 4, 5, 100, 1000)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
size,err:=rb.WriteTo(buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed writing")
|
|
||||||
}
|
|
||||||
newrb:= New()
|
|
||||||
size,err=newrb.ReadFrom(buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed reading")
|
|
||||||
}
|
|
||||||
if ! rb.Equals(newrb) {
|
|
||||||
t.Errorf("Cannot retrieve serialized version")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Given N integers in [0,x), then the serialized size in bytes of
|
|
||||||
a Roaring bitmap should never exceed this bound:
|
|
||||||
|
|
||||||
`` 8 + 9 * ((long)x+65535)/65536 + 2 * N ``
|
|
||||||
|
|
||||||
That is, given a fixed overhead for the universe size (x), Roaring
|
|
||||||
bitmaps never use more than 2 bytes per integer. You can call
|
|
||||||
``BoundSerializedSizeInBytes`` for a more precise estimate.
|
|
||||||
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
|
|
||||||
Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring
|
|
||||||
|
|
||||||
### Goroutine safety
|
|
||||||
|
|
||||||
In general, it should not generally be considered safe to access
|
|
||||||
the same bitmaps using different goroutines--they are left
|
|
||||||
unsynchronized for performance. Should you want to access
|
|
||||||
a Bitmap from more than one goroutine, you should
|
|
||||||
provide synchronization. Typically this is done by using channels to pass
|
|
||||||
the *Bitmap around (in Go style; so there is only ever one owner),
|
|
||||||
or by using `sync.Mutex` to serialize operations on Bitmaps.
|
|
||||||
|
|
||||||
### Coverage
|
|
||||||
|
|
||||||
We test our software. For a report on our test coverage, see
|
|
||||||
|
|
||||||
https://coveralls.io/github/RoaringBitmap/roaring?branch=master
|
|
||||||
|
|
||||||
### Benchmark
|
|
||||||
|
|
||||||
Type
|
|
||||||
|
|
||||||
go test -bench Benchmark -run -
|
|
||||||
|
|
||||||
To run benchmarks on [Real Roaring Datasets](https://github.com/RoaringBitmap/real-roaring-datasets)
|
|
||||||
run the following:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get github.com/RoaringBitmap/real-roaring-datasets
|
|
||||||
BENCH_REAL_DATA=1 go test -bench BenchmarkRealData -run -
|
|
||||||
```
|
|
||||||
|
|
||||||
### Iterative use
|
|
||||||
|
|
||||||
You can use roaring with gore:
|
|
||||||
|
|
||||||
- go get -u github.com/motemen/gore
|
|
||||||
- Make sure that ``$GOPATH/bin`` is in your ``$PATH``.
|
|
||||||
- go get github/RoaringBitmap/roaring
|
|
||||||
|
|
||||||
```go
|
|
||||||
$ gore
|
|
||||||
gore version 0.2.6 :help for help
|
|
||||||
gore> :import github.com/RoaringBitmap/roaring
|
|
||||||
gore> x:=roaring.New()
|
|
||||||
gore> x.Add(1)
|
|
||||||
gore> x.String()
|
|
||||||
"{1}"
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Fuzzy testing
|
|
||||||
|
|
||||||
You can help us test further the library with fuzzy testing:
|
|
||||||
|
|
||||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
|
||||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
|
||||||
go test -tags=gofuzz -run=TestGenerateSmatCorpus
|
|
||||||
go-fuzz-build github.com/RoaringBitmap/roaring
|
|
||||||
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
|
|
||||||
|
|
||||||
Let it run, and if the # of crashers is > 0, check out the reports in
|
|
||||||
the workdir where you should be able to find the panic goroutine stack
|
|
||||||
traces.
|
|
||||||
|
|
||||||
### Alternative in Go
|
|
||||||
|
|
||||||
There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring
|
|
||||||
|
|
||||||
For an alternative implementation in Go, see https://github.com/fzandona/goroar
|
|
||||||
The two versions were written independently.
|
|
||||||
|
|
||||||
|
|
||||||
### Mailing list/discussion group
|
|
||||||
|
|
||||||
https://groups.google.com/forum/#!forum/roaring-bitmaps
|
|
968
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
968
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
@ -1,968 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate msgp -unexported
|
|
||||||
|
|
||||||
type arrayContainer struct {
|
|
||||||
content []uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) String() string {
|
|
||||||
s := "{"
|
|
||||||
for it := ac.getShortIterator(); it.hasNext(); {
|
|
||||||
s += fmt.Sprintf("%v, ", it.next())
|
|
||||||
}
|
|
||||||
return s + "}"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
|
|
||||||
for k := 0; k < len(ac.content); k++ {
|
|
||||||
x[k+i] = uint32(ac.content[k]) | mask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) getShortIterator() shortIterable {
|
|
||||||
return &shortIterator{ac.content, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) getReverseIterator() shortIterable {
|
|
||||||
return &reverseIterator{ac.content, len(ac.content) - 1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) getManyIterator() manyIterable {
|
|
||||||
return &manyIterator{ac.content, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) minimum() uint16 {
|
|
||||||
return ac.content[0] // assume not empty
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) maximum() uint16 {
|
|
||||||
return ac.content[len(ac.content)-1] // assume not empty
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) getSizeInBytes() int {
|
|
||||||
return ac.getCardinality() * 2
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) serializedSizeInBytes() int {
|
|
||||||
return ac.getCardinality() * 2
|
|
||||||
}
|
|
||||||
|
|
||||||
func arrayContainerSizeInBytes(card int) int {
|
|
||||||
return card * 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the values in the range [firstOfRange,endx)
|
|
||||||
func (ac *arrayContainer) iaddRange(firstOfRange, endx int) container {
|
|
||||||
if firstOfRange >= endx {
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
indexstart := binarySearch(ac.content, uint16(firstOfRange))
|
|
||||||
if indexstart < 0 {
|
|
||||||
indexstart = -indexstart - 1
|
|
||||||
}
|
|
||||||
indexend := binarySearch(ac.content, uint16(endx-1))
|
|
||||||
if indexend < 0 {
|
|
||||||
indexend = -indexend - 1
|
|
||||||
} else {
|
|
||||||
indexend++
|
|
||||||
}
|
|
||||||
rangelength := endx - firstOfRange
|
|
||||||
newcardinality := indexstart + (ac.getCardinality() - indexend) + rangelength
|
|
||||||
if newcardinality > arrayDefaultMaxSize {
|
|
||||||
a := ac.toBitmapContainer()
|
|
||||||
return a.iaddRange(firstOfRange, endx)
|
|
||||||
}
|
|
||||||
if cap(ac.content) < newcardinality {
|
|
||||||
tmp := make([]uint16, newcardinality, newcardinality)
|
|
||||||
copy(tmp[:indexstart], ac.content[:indexstart])
|
|
||||||
copy(tmp[indexstart+rangelength:], ac.content[indexend:])
|
|
||||||
|
|
||||||
ac.content = tmp
|
|
||||||
} else {
|
|
||||||
ac.content = ac.content[:newcardinality]
|
|
||||||
copy(ac.content[indexstart+rangelength:], ac.content[indexend:])
|
|
||||||
|
|
||||||
}
|
|
||||||
for k := 0; k < rangelength; k++ {
|
|
||||||
ac.content[k+indexstart] = uint16(firstOfRange + k)
|
|
||||||
}
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the values in the range [firstOfRange,endx)
|
|
||||||
func (ac *arrayContainer) iremoveRange(firstOfRange, endx int) container {
|
|
||||||
if firstOfRange >= endx {
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
indexstart := binarySearch(ac.content, uint16(firstOfRange))
|
|
||||||
if indexstart < 0 {
|
|
||||||
indexstart = -indexstart - 1
|
|
||||||
}
|
|
||||||
indexend := binarySearch(ac.content, uint16(endx-1))
|
|
||||||
if indexend < 0 {
|
|
||||||
indexend = -indexend - 1
|
|
||||||
} else {
|
|
||||||
indexend++
|
|
||||||
}
|
|
||||||
rangelength := indexend - indexstart
|
|
||||||
answer := ac
|
|
||||||
copy(answer.content[indexstart:], ac.content[indexstart+rangelength:])
|
|
||||||
answer.content = answer.content[:ac.getCardinality()-rangelength]
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
// flip the values in the range [firstOfRange,endx)
|
|
||||||
func (ac *arrayContainer) not(firstOfRange, endx int) container {
|
|
||||||
if firstOfRange >= endx {
|
|
||||||
return ac.clone()
|
|
||||||
}
|
|
||||||
return ac.notClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// flip the values in the range [firstOfRange,lastOfRange]
|
|
||||||
func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container {
|
|
||||||
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
|
|
||||||
return ac.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine the span of array indices to be affected^M
|
|
||||||
startIndex := binarySearch(ac.content, uint16(firstOfRange))
|
|
||||||
if startIndex < 0 {
|
|
||||||
startIndex = -startIndex - 1
|
|
||||||
}
|
|
||||||
lastIndex := binarySearch(ac.content, uint16(lastOfRange))
|
|
||||||
if lastIndex < 0 {
|
|
||||||
lastIndex = -lastIndex - 2
|
|
||||||
}
|
|
||||||
currentValuesInRange := lastIndex - startIndex + 1
|
|
||||||
spanToBeFlipped := lastOfRange - firstOfRange + 1
|
|
||||||
newValuesInRange := spanToBeFlipped - currentValuesInRange
|
|
||||||
cardinalityChange := newValuesInRange - currentValuesInRange
|
|
||||||
newCardinality := len(ac.content) + cardinalityChange
|
|
||||||
if newCardinality > arrayDefaultMaxSize {
|
|
||||||
return ac.toBitmapContainer().not(firstOfRange, lastOfRange+1)
|
|
||||||
}
|
|
||||||
answer := newArrayContainer()
|
|
||||||
answer.content = make([]uint16, newCardinality, newCardinality) //a hack for sure
|
|
||||||
|
|
||||||
copy(answer.content, ac.content[:startIndex])
|
|
||||||
outPos := startIndex
|
|
||||||
inPos := startIndex
|
|
||||||
valInRange := firstOfRange
|
|
||||||
for ; valInRange <= lastOfRange && inPos <= lastIndex; valInRange++ {
|
|
||||||
if uint16(valInRange) != ac.content[inPos] {
|
|
||||||
answer.content[outPos] = uint16(valInRange)
|
|
||||||
outPos++
|
|
||||||
} else {
|
|
||||||
inPos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for ; valInRange <= lastOfRange; valInRange++ {
|
|
||||||
answer.content[outPos] = uint16(valInRange)
|
|
||||||
outPos++
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := lastIndex + 1; i < len(ac.content); i++ {
|
|
||||||
answer.content[outPos] = ac.content[i]
|
|
||||||
outPos++
|
|
||||||
}
|
|
||||||
answer.content = answer.content[:newCardinality]
|
|
||||||
return answer
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) equals(o container) bool {
|
|
||||||
|
|
||||||
srb, ok := o.(*arrayContainer)
|
|
||||||
if ok {
|
|
||||||
// Check if the containers are the same object.
|
|
||||||
if ac == srb {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(srb.content) != len(ac.content) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, v := range ac.content {
|
|
||||||
if v != srb.content[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// use generic comparison
|
|
||||||
bCard := o.getCardinality()
|
|
||||||
aCard := ac.getCardinality()
|
|
||||||
if bCard != aCard {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
ait := ac.getShortIterator()
|
|
||||||
bit := o.getShortIterator()
|
|
||||||
for ait.hasNext() {
|
|
||||||
if bit.next() != ait.next() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) toBitmapContainer() *bitmapContainer {
|
|
||||||
bc := newBitmapContainer()
|
|
||||||
bc.loadData(ac)
|
|
||||||
return bc
|
|
||||||
|
|
||||||
}
|
|
||||||
func (ac *arrayContainer) iadd(x uint16) (wasNew bool) {
|
|
||||||
// Special case adding to the end of the container.
|
|
||||||
l := len(ac.content)
|
|
||||||
if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
|
|
||||||
ac.content = append(ac.content, x)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
loc := binarySearch(ac.content, x)
|
|
||||||
|
|
||||||
if loc < 0 {
|
|
||||||
s := ac.content
|
|
||||||
i := -loc - 1
|
|
||||||
s = append(s, 0)
|
|
||||||
copy(s[i+1:], s[i:])
|
|
||||||
s[i] = x
|
|
||||||
ac.content = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iaddReturnMinimized(x uint16) container {
|
|
||||||
// Special case adding to the end of the container.
|
|
||||||
l := len(ac.content)
|
|
||||||
if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
|
|
||||||
ac.content = append(ac.content, x)
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
loc := binarySearch(ac.content, x)
|
|
||||||
|
|
||||||
if loc < 0 {
|
|
||||||
if len(ac.content) >= arrayDefaultMaxSize {
|
|
||||||
a := ac.toBitmapContainer()
|
|
||||||
a.iadd(x)
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
s := ac.content
|
|
||||||
i := -loc - 1
|
|
||||||
s = append(s, 0)
|
|
||||||
copy(s[i+1:], s[i:])
|
|
||||||
s[i] = x
|
|
||||||
ac.content = s
|
|
||||||
}
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
// iremoveReturnMinimized is allowed to change the return type to minimize storage.
|
|
||||||
func (ac *arrayContainer) iremoveReturnMinimized(x uint16) container {
|
|
||||||
ac.iremove(x)
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iremove(x uint16) bool {
|
|
||||||
loc := binarySearch(ac.content, x)
|
|
||||||
if loc >= 0 {
|
|
||||||
s := ac.content
|
|
||||||
s = append(s[:loc], s[loc+1:]...)
|
|
||||||
ac.content = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) remove(x uint16) container {
|
|
||||||
out := &arrayContainer{make([]uint16, len(ac.content))}
|
|
||||||
copy(out.content, ac.content[:])
|
|
||||||
|
|
||||||
loc := binarySearch(out.content, x)
|
|
||||||
if loc >= 0 {
|
|
||||||
s := out.content
|
|
||||||
s = append(s[:loc], s[loc+1:]...)
|
|
||||||
out.content = s
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) or(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.orArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return x.orArray(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return x.clone()
|
|
||||||
}
|
|
||||||
return x.orArray(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) orCardinality(a container) int {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.orArrayCardinality(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return x.orArrayCardinality(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
return x.orArrayCardinality(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) ior(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.iorArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return a.(*bitmapContainer).orArray(ac)
|
|
||||||
//return ac.iorBitmap(x) // note: this does not make sense
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return x.clone()
|
|
||||||
}
|
|
||||||
return ac.iorRun16(x)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iorArray(value2 *arrayContainer) container {
|
|
||||||
value1 := ac
|
|
||||||
len1 := value1.getCardinality()
|
|
||||||
len2 := value2.getCardinality()
|
|
||||||
maxPossibleCardinality := len1 + len2
|
|
||||||
if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
|
|
||||||
bc := newBitmapContainer()
|
|
||||||
for k := 0; k < len(value2.content); k++ {
|
|
||||||
v := value2.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
for k := 0; k < len(ac.content); k++ {
|
|
||||||
v := ac.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
bc.cardinality = int(popcntSlice(bc.bitmap))
|
|
||||||
if bc.cardinality <= arrayDefaultMaxSize {
|
|
||||||
return bc.toArrayContainer()
|
|
||||||
}
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
if maxPossibleCardinality > cap(value1.content) {
|
|
||||||
newcontent := make([]uint16, 0, maxPossibleCardinality)
|
|
||||||
copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1])
|
|
||||||
ac.content = newcontent
|
|
||||||
} else {
|
|
||||||
copy(ac.content[len2:maxPossibleCardinality], ac.content[0:len1])
|
|
||||||
}
|
|
||||||
nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content)
|
|
||||||
ac.content = ac.content[:nl] // reslice to match actual used capacity
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: such code does not make practical sense, except for lazy evaluations
|
|
||||||
func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container {
|
|
||||||
bc1 := ac.toBitmapContainer()
|
|
||||||
bc1.iorBitmap(bc2)
|
|
||||||
*ac = *newArrayContainerFromBitmap(bc1)
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iorRun16(rc *runContainer16) container {
|
|
||||||
bc1 := ac.toBitmapContainer()
|
|
||||||
bc2 := rc.toBitmapContainer()
|
|
||||||
bc1.iorBitmap(bc2)
|
|
||||||
*ac = *newArrayContainerFromBitmap(bc1)
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyIOR(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.lazyIorArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return ac.lazyIorBitmap(x)
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return x.clone()
|
|
||||||
}
|
|
||||||
return ac.lazyIorRun16(x)
|
|
||||||
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyIorArray(ac2 *arrayContainer) container {
|
|
||||||
// TODO actually make this lazy
|
|
||||||
return ac.iorArray(ac2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyIorBitmap(bc *bitmapContainer) container {
|
|
||||||
// TODO actually make this lazy
|
|
||||||
return ac.iorBitmap(bc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyIorRun16(rc *runContainer16) container {
|
|
||||||
// TODO actually make this lazy
|
|
||||||
return ac.iorRun16(rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyOR(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.lazyorArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return a.lazyOR(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return x.clone()
|
|
||||||
}
|
|
||||||
return x.orArray(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) orArray(value2 *arrayContainer) container {
|
|
||||||
value1 := ac
|
|
||||||
maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
|
|
||||||
if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
|
|
||||||
bc := newBitmapContainer()
|
|
||||||
for k := 0; k < len(value2.content); k++ {
|
|
||||||
v := value2.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
for k := 0; k < len(ac.content); k++ {
|
|
||||||
v := ac.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
bc.cardinality = int(popcntSlice(bc.bitmap))
|
|
||||||
if bc.cardinality <= arrayDefaultMaxSize {
|
|
||||||
return bc.toArrayContainer()
|
|
||||||
}
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
answer := newArrayContainerCapacity(maxPossibleCardinality)
|
|
||||||
nl := union2by2(value1.content, value2.content, answer.content)
|
|
||||||
answer.content = answer.content[:nl] // reslice to match actual used capacity
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int {
|
|
||||||
return union2by2Cardinality(ac.content, value2.content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container {
|
|
||||||
value1 := ac
|
|
||||||
maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
|
|
||||||
if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M
|
|
||||||
bc := newBitmapContainer()
|
|
||||||
for k := 0; k < len(value2.content); k++ {
|
|
||||||
v := value2.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
for k := 0; k < len(ac.content); k++ {
|
|
||||||
v := ac.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
mask := uint64(1) << (v % 64)
|
|
||||||
bc.bitmap[i] |= mask
|
|
||||||
}
|
|
||||||
bc.cardinality = invalidCardinality
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
answer := newArrayContainerCapacity(maxPossibleCardinality)
|
|
||||||
nl := union2by2(value1.content, value2.content, answer.content)
|
|
||||||
answer.content = answer.content[:nl] // reslice to match actual used capacity
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) and(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.andArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return x.and(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return ac.clone()
|
|
||||||
}
|
|
||||||
return x.andArray(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andCardinality(a container) int {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.andArrayCardinality(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return x.andCardinality(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
return x.andArrayCardinality(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) intersects(a container) bool {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.intersectsArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return x.intersects(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
return x.intersects(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iand(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.iandArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return ac.iandBitmap(x)
|
|
||||||
case *runContainer16:
|
|
||||||
if x.isFull() {
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
return x.andArray(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandBitmap(bc *bitmapContainer) container {
|
|
||||||
pos := 0
|
|
||||||
c := ac.getCardinality()
|
|
||||||
for k := 0; k < c; k++ {
|
|
||||||
// branchless
|
|
||||||
v := ac.content[k]
|
|
||||||
ac.content[pos] = v
|
|
||||||
pos += int(bc.bitValue(v))
|
|
||||||
}
|
|
||||||
ac.content = ac.content[:pos]
|
|
||||||
return ac
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) xor(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.xorArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return a.xor(ac)
|
|
||||||
case *runContainer16:
|
|
||||||
return x.xorArray(ac)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) xorArray(value2 *arrayContainer) container {
|
|
||||||
value1 := ac
|
|
||||||
totalCardinality := value1.getCardinality() + value2.getCardinality()
|
|
||||||
if totalCardinality > arrayDefaultMaxSize { // it could be a bitmap!
|
|
||||||
bc := newBitmapContainer()
|
|
||||||
for k := 0; k < len(value2.content); k++ {
|
|
||||||
v := value2.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
bc.bitmap[i] ^= (uint64(1) << (v % 64))
|
|
||||||
}
|
|
||||||
for k := 0; k < len(ac.content); k++ {
|
|
||||||
v := ac.content[k]
|
|
||||||
i := uint(v) >> 6
|
|
||||||
bc.bitmap[i] ^= (uint64(1) << (v % 64))
|
|
||||||
}
|
|
||||||
bc.computeCardinality()
|
|
||||||
if bc.cardinality <= arrayDefaultMaxSize {
|
|
||||||
return bc.toArrayContainer()
|
|
||||||
}
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
desiredCapacity := totalCardinality
|
|
||||||
answer := newArrayContainerCapacity(desiredCapacity)
|
|
||||||
length := exclusiveUnion2by2(value1.content, value2.content, answer.content)
|
|
||||||
answer.content = answer.content[:length]
|
|
||||||
return answer
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andNot(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.andNotArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return ac.andNotBitmap(x)
|
|
||||||
case *runContainer16:
|
|
||||||
return ac.andNotRun16(x)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andNotRun16(rc *runContainer16) container {
|
|
||||||
acb := ac.toBitmapContainer()
|
|
||||||
rcb := rc.toBitmapContainer()
|
|
||||||
return acb.andNotBitmap(rcb)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandNot(a container) container {
|
|
||||||
switch x := a.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return ac.iandNotArray(x)
|
|
||||||
case *bitmapContainer:
|
|
||||||
return ac.iandNotBitmap(x)
|
|
||||||
case *runContainer16:
|
|
||||||
return ac.iandNotRun16(x)
|
|
||||||
}
|
|
||||||
panic("unsupported container type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandNotRun16(rc *runContainer16) container {
|
|
||||||
rcb := rc.toBitmapContainer()
|
|
||||||
acb := ac.toBitmapContainer()
|
|
||||||
acb.iandNotBitmapSurely(rcb)
|
|
||||||
*ac = *(acb.toArrayContainer())
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andNotArray(value2 *arrayContainer) container {
|
|
||||||
value1 := ac
|
|
||||||
desiredcapacity := value1.getCardinality()
|
|
||||||
answer := newArrayContainerCapacity(desiredcapacity)
|
|
||||||
length := difference(value1.content, value2.content, answer.content)
|
|
||||||
answer.content = answer.content[:length]
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandNotArray(value2 *arrayContainer) container {
|
|
||||||
length := difference(ac.content, value2.content, ac.content)
|
|
||||||
ac.content = ac.content[:length]
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andNotBitmap(value2 *bitmapContainer) container {
|
|
||||||
desiredcapacity := ac.getCardinality()
|
|
||||||
answer := newArrayContainerCapacity(desiredcapacity)
|
|
||||||
answer.content = answer.content[:desiredcapacity]
|
|
||||||
pos := 0
|
|
||||||
for _, v := range ac.content {
|
|
||||||
answer.content[pos] = v
|
|
||||||
pos += 1 - int(value2.bitValue(v))
|
|
||||||
}
|
|
||||||
answer.content = answer.content[:pos]
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andBitmap(value2 *bitmapContainer) container {
|
|
||||||
desiredcapacity := ac.getCardinality()
|
|
||||||
answer := newArrayContainerCapacity(desiredcapacity)
|
|
||||||
answer.content = answer.content[:desiredcapacity]
|
|
||||||
pos := 0
|
|
||||||
for _, v := range ac.content {
|
|
||||||
answer.content[pos] = v
|
|
||||||
pos += int(value2.bitValue(v))
|
|
||||||
}
|
|
||||||
answer.content = answer.content[:pos]
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandNotBitmap(value2 *bitmapContainer) container {
|
|
||||||
pos := 0
|
|
||||||
for _, v := range ac.content {
|
|
||||||
ac.content[pos] = v
|
|
||||||
pos += 1 - int(value2.bitValue(v))
|
|
||||||
}
|
|
||||||
ac.content = ac.content[:pos]
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyOf(array []uint16, size int) []uint16 {
|
|
||||||
result := make([]uint16, size)
|
|
||||||
for i, x := range array {
|
|
||||||
if i == size {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
result[i] = x
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// flip the values in the range [firstOfRange,endx)
|
|
||||||
func (ac *arrayContainer) inot(firstOfRange, endx int) container {
|
|
||||||
if firstOfRange >= endx {
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
return ac.inotClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// flip the values in the range [firstOfRange,lastOfRange]
|
|
||||||
func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
|
|
||||||
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
// determine the span of array indices to be affected
|
|
||||||
startIndex := binarySearch(ac.content, uint16(firstOfRange))
|
|
||||||
if startIndex < 0 {
|
|
||||||
startIndex = -startIndex - 1
|
|
||||||
}
|
|
||||||
lastIndex := binarySearch(ac.content, uint16(lastOfRange))
|
|
||||||
if lastIndex < 0 {
|
|
||||||
lastIndex = -lastIndex - 1 - 1
|
|
||||||
}
|
|
||||||
currentValuesInRange := lastIndex - startIndex + 1
|
|
||||||
spanToBeFlipped := lastOfRange - firstOfRange + 1
|
|
||||||
|
|
||||||
newValuesInRange := spanToBeFlipped - currentValuesInRange
|
|
||||||
buffer := make([]uint16, newValuesInRange)
|
|
||||||
cardinalityChange := newValuesInRange - currentValuesInRange
|
|
||||||
newCardinality := len(ac.content) + cardinalityChange
|
|
||||||
if cardinalityChange > 0 {
|
|
||||||
if newCardinality > len(ac.content) {
|
|
||||||
if newCardinality > arrayDefaultMaxSize {
|
|
||||||
bcRet := ac.toBitmapContainer()
|
|
||||||
bcRet.inot(firstOfRange, lastOfRange+1)
|
|
||||||
*ac = *bcRet.toArrayContainer()
|
|
||||||
return bcRet
|
|
||||||
}
|
|
||||||
ac.content = copyOf(ac.content, newCardinality)
|
|
||||||
}
|
|
||||||
base := lastIndex + 1
|
|
||||||
copy(ac.content[lastIndex+1+cardinalityChange:], ac.content[base:base+len(ac.content)-1-lastIndex])
|
|
||||||
ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
|
|
||||||
} else { // no expansion needed
|
|
||||||
ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
|
|
||||||
if cardinalityChange < 0 {
|
|
||||||
|
|
||||||
for i := startIndex + newValuesInRange; i < newCardinality; i++ {
|
|
||||||
ac.content[i] = ac.content[i-cardinalityChange]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ac.content = ac.content[:newCardinality]
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) negateRange(buffer []uint16, startIndex, lastIndex, startRange, lastRange int) {
|
|
||||||
// compute the negation into buffer
|
|
||||||
outPos := 0
|
|
||||||
inPos := startIndex // value here always >= valInRange,
|
|
||||||
// until it is exhausted
|
|
||||||
// n.b., we can start initially exhausted.
|
|
||||||
|
|
||||||
valInRange := startRange
|
|
||||||
for ; valInRange < lastRange && inPos <= lastIndex; valInRange++ {
|
|
||||||
if uint16(valInRange) != ac.content[inPos] {
|
|
||||||
buffer[outPos] = uint16(valInRange)
|
|
||||||
outPos++
|
|
||||||
} else {
|
|
||||||
inPos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there are extra items (greater than the biggest
|
|
||||||
// pre-existing one in range), buffer them
|
|
||||||
for ; valInRange < lastRange; valInRange++ {
|
|
||||||
buffer[outPos] = uint16(valInRange)
|
|
||||||
outPos++
|
|
||||||
}
|
|
||||||
|
|
||||||
if outPos != len(buffer) {
|
|
||||||
panic("negateRange: internal bug")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, item := range buffer {
|
|
||||||
ac.content[i+startIndex] = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) isFull() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andArray(value2 *arrayContainer) container {
|
|
||||||
desiredcapacity := minOfInt(ac.getCardinality(), value2.getCardinality())
|
|
||||||
answer := newArrayContainerCapacity(desiredcapacity)
|
|
||||||
length := intersection2by2(
|
|
||||||
ac.content,
|
|
||||||
value2.content,
|
|
||||||
answer.content)
|
|
||||||
answer.content = answer.content[:length]
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) andArrayCardinality(value2 *arrayContainer) int {
|
|
||||||
return intersection2by2Cardinality(
|
|
||||||
ac.content,
|
|
||||||
value2.content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) intersectsArray(value2 *arrayContainer) bool {
|
|
||||||
return intersects2by2(
|
|
||||||
ac.content,
|
|
||||||
value2.content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) iandArray(value2 *arrayContainer) container {
|
|
||||||
length := intersection2by2(
|
|
||||||
ac.content,
|
|
||||||
value2.content,
|
|
||||||
ac.content)
|
|
||||||
ac.content = ac.content[:length]
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) getCardinality() int {
|
|
||||||
return len(ac.content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) rank(x uint16) int {
|
|
||||||
answer := binarySearch(ac.content, x)
|
|
||||||
if answer >= 0 {
|
|
||||||
return answer + 1
|
|
||||||
}
|
|
||||||
return -answer - 1
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) selectInt(x uint16) int {
|
|
||||||
return int(ac.content[x])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) clone() container {
|
|
||||||
ptr := arrayContainer{make([]uint16, len(ac.content))}
|
|
||||||
copy(ptr.content, ac.content[:])
|
|
||||||
return &ptr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) contains(x uint16) bool {
|
|
||||||
return binarySearch(ac.content, x) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) loadData(bitmapContainer *bitmapContainer) {
|
|
||||||
ac.content = make([]uint16, bitmapContainer.cardinality, bitmapContainer.cardinality)
|
|
||||||
bitmapContainer.fillArray(ac.content)
|
|
||||||
}
|
|
||||||
func newArrayContainer() *arrayContainer {
|
|
||||||
p := new(arrayContainer)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func newArrayContainerFromBitmap(bc *bitmapContainer) *arrayContainer {
|
|
||||||
ac := &arrayContainer{}
|
|
||||||
ac.loadData(bc)
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
|
|
||||||
func newArrayContainerCapacity(size int) *arrayContainer {
|
|
||||||
p := new(arrayContainer)
|
|
||||||
p.content = make([]uint16, 0, size)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func newArrayContainerSize(size int) *arrayContainer {
|
|
||||||
p := new(arrayContainer)
|
|
||||||
p.content = make([]uint16, size, size)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func newArrayContainerRange(firstOfRun, lastOfRun int) *arrayContainer {
|
|
||||||
valuesInRange := lastOfRun - firstOfRun + 1
|
|
||||||
this := newArrayContainerCapacity(valuesInRange)
|
|
||||||
for i := 0; i < valuesInRange; i++ {
|
|
||||||
this.content = append(this.content, uint16(firstOfRun+i))
|
|
||||||
}
|
|
||||||
return this
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) numberOfRuns() (nr int) {
|
|
||||||
n := len(ac.content)
|
|
||||||
var runlen uint16
|
|
||||||
var cur, prev uint16
|
|
||||||
|
|
||||||
switch n {
|
|
||||||
case 0:
|
|
||||||
return 0
|
|
||||||
case 1:
|
|
||||||
return 1
|
|
||||||
default:
|
|
||||||
for i := 1; i < n; i++ {
|
|
||||||
prev = ac.content[i-1]
|
|
||||||
cur = ac.content[i]
|
|
||||||
|
|
||||||
if cur == prev+1 {
|
|
||||||
runlen++
|
|
||||||
} else {
|
|
||||||
if cur < prev {
|
|
||||||
panic("then fundamental arrayContainer assumption of sorted ac.content was broken")
|
|
||||||
}
|
|
||||||
if cur == prev {
|
|
||||||
panic("then fundamental arrayContainer assumption of deduplicated content was broken")
|
|
||||||
} else {
|
|
||||||
nr++
|
|
||||||
runlen = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nr++
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert to run or array *if needed*
|
|
||||||
func (ac *arrayContainer) toEfficientContainer() container {
|
|
||||||
|
|
||||||
numRuns := ac.numberOfRuns()
|
|
||||||
|
|
||||||
sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns)
|
|
||||||
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
|
|
||||||
card := ac.getCardinality()
|
|
||||||
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
|
|
||||||
|
|
||||||
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
|
|
||||||
return newRunContainer16FromArray(ac)
|
|
||||||
}
|
|
||||||
if card <= arrayDefaultMaxSize {
|
|
||||||
return ac
|
|
||||||
}
|
|
||||||
return ac.toBitmapContainer()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) containerType() contype {
|
|
||||||
return arrayContype
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *arrayContainer) addOffset(x uint16) []container {
|
|
||||||
low := &arrayContainer{}
|
|
||||||
high := &arrayContainer{}
|
|
||||||
for _, val := range ac.content {
|
|
||||||
y := uint32(val) + uint32(x)
|
|
||||||
if highbits(y) > 0 {
|
|
||||||
high.content = append(high.content, lowbits(y))
|
|
||||||
} else {
|
|
||||||
low.content = append(low.content, lowbits(y))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return []container{low, high}
|
|
||||||
}
|
|
134
vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go
generated
vendored
134
vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go
generated
vendored
@ -1,134 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
// NOTE: THIS FILE WAS PRODUCED BY THE
|
|
||||||
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
import "github.com/tinylib/msgp/msgp"
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zbzg uint32
|
|
||||||
zbzg, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zbzg > 0 {
|
|
||||||
zbzg--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "content":
|
|
||||||
var zbai uint32
|
|
||||||
zbai, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.content) >= int(zbai) {
|
|
||||||
z.content = (z.content)[:zbai]
|
|
||||||
} else {
|
|
||||||
z.content = make([]uint16, zbai)
|
|
||||||
}
|
|
||||||
for zxvk := range z.content {
|
|
||||||
z.content[zxvk], err = dc.ReadUint16()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
// map header, size 1
|
|
||||||
// write "content"
|
|
||||||
err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.content)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zxvk := range z.content {
|
|
||||||
err = en.WriteUint16(z.content[zxvk])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
// map header, size 1
|
|
||||||
// string "content"
|
|
||||||
o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.content)))
|
|
||||||
for zxvk := range z.content {
|
|
||||||
o = msgp.AppendUint16(o, z.content[zxvk])
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zcmr uint32
|
|
||||||
zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zcmr > 0 {
|
|
||||||
zcmr--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "content":
|
|
||||||
var zajw uint32
|
|
||||||
zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.content) >= int(zajw) {
|
|
||||||
z.content = (z.content)[:zajw]
|
|
||||||
} else {
|
|
||||||
z.content = make([]uint16, zajw)
|
|
||||||
}
|
|
||||||
for zxvk := range z.content {
|
|
||||||
z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z *arrayContainer) Msgsize() (s int) {
|
|
||||||
s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size))
|
|
||||||
return
|
|
||||||
}
|
|
1076
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
generated
vendored
1076
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
415
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go
generated
vendored
415
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go
generated
vendored
@ -1,415 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
// NOTE: THIS FILE WAS PRODUCED BY THE
|
|
||||||
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
import "github.com/tinylib/msgp/msgp"
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zbzg uint32
|
|
||||||
zbzg, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zbzg > 0 {
|
|
||||||
zbzg--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "cardinality":
|
|
||||||
z.cardinality, err = dc.ReadInt()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "bitmap":
|
|
||||||
var zbai uint32
|
|
||||||
zbai, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.bitmap) >= int(zbai) {
|
|
||||||
z.bitmap = (z.bitmap)[:zbai]
|
|
||||||
} else {
|
|
||||||
z.bitmap = make([]uint64, zbai)
|
|
||||||
}
|
|
||||||
for zxvk := range z.bitmap {
|
|
||||||
z.bitmap[zxvk], err = dc.ReadUint64()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
// map header, size 2
|
|
||||||
// write "cardinality"
|
|
||||||
err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteInt(z.cardinality)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// write "bitmap"
|
|
||||||
err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.bitmap)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zxvk := range z.bitmap {
|
|
||||||
err = en.WriteUint64(z.bitmap[zxvk])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
// map header, size 2
|
|
||||||
// string "cardinality"
|
|
||||||
o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
|
|
||||||
o = msgp.AppendInt(o, z.cardinality)
|
|
||||||
// string "bitmap"
|
|
||||||
o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap)))
|
|
||||||
for zxvk := range z.bitmap {
|
|
||||||
o = msgp.AppendUint64(o, z.bitmap[zxvk])
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zcmr uint32
|
|
||||||
zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zcmr > 0 {
|
|
||||||
zcmr--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "cardinality":
|
|
||||||
z.cardinality, bts, err = msgp.ReadIntBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "bitmap":
|
|
||||||
var zajw uint32
|
|
||||||
zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.bitmap) >= int(zajw) {
|
|
||||||
z.bitmap = (z.bitmap)[:zajw]
|
|
||||||
} else {
|
|
||||||
z.bitmap = make([]uint64, zajw)
|
|
||||||
}
|
|
||||||
for zxvk := range z.bitmap {
|
|
||||||
z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z *bitmapContainer) Msgsize() (s int) {
|
|
||||||
s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zhct uint32
|
|
||||||
zhct, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zhct > 0 {
|
|
||||||
zhct--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "ptr":
|
|
||||||
if dc.IsNil() {
|
|
||||||
err = dc.ReadNil()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.ptr = nil
|
|
||||||
} else {
|
|
||||||
if z.ptr == nil {
|
|
||||||
z.ptr = new(bitmapContainer)
|
|
||||||
}
|
|
||||||
var zcua uint32
|
|
||||||
zcua, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zcua > 0 {
|
|
||||||
zcua--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "cardinality":
|
|
||||||
z.ptr.cardinality, err = dc.ReadInt()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "bitmap":
|
|
||||||
var zxhx uint32
|
|
||||||
zxhx, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.ptr.bitmap) >= int(zxhx) {
|
|
||||||
z.ptr.bitmap = (z.ptr.bitmap)[:zxhx]
|
|
||||||
} else {
|
|
||||||
z.ptr.bitmap = make([]uint64, zxhx)
|
|
||||||
}
|
|
||||||
for zwht := range z.ptr.bitmap {
|
|
||||||
z.ptr.bitmap[zwht], err = dc.ReadUint64()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "i":
|
|
||||||
z.i, err = dc.ReadInt()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
// map header, size 2
|
|
||||||
// write "ptr"
|
|
||||||
err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if z.ptr == nil {
|
|
||||||
err = en.WriteNil()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// map header, size 2
|
|
||||||
// write "cardinality"
|
|
||||||
err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteInt(z.ptr.cardinality)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// write "bitmap"
|
|
||||||
err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zwht := range z.ptr.bitmap {
|
|
||||||
err = en.WriteUint64(z.ptr.bitmap[zwht])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// write "i"
|
|
||||||
err = en.Append(0xa1, 0x69)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteInt(z.i)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
// map header, size 2
|
|
||||||
// string "ptr"
|
|
||||||
o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72)
|
|
||||||
if z.ptr == nil {
|
|
||||||
o = msgp.AppendNil(o)
|
|
||||||
} else {
|
|
||||||
// map header, size 2
|
|
||||||
// string "cardinality"
|
|
||||||
o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
|
|
||||||
o = msgp.AppendInt(o, z.ptr.cardinality)
|
|
||||||
// string "bitmap"
|
|
||||||
o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap)))
|
|
||||||
for zwht := range z.ptr.bitmap {
|
|
||||||
o = msgp.AppendUint64(o, z.ptr.bitmap[zwht])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// string "i"
|
|
||||||
o = append(o, 0xa1, 0x69)
|
|
||||||
o = msgp.AppendInt(o, z.i)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zlqf uint32
|
|
||||||
zlqf, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zlqf > 0 {
|
|
||||||
zlqf--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "ptr":
|
|
||||||
if msgp.IsNil(bts) {
|
|
||||||
bts, err = msgp.ReadNilBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.ptr = nil
|
|
||||||
} else {
|
|
||||||
if z.ptr == nil {
|
|
||||||
z.ptr = new(bitmapContainer)
|
|
||||||
}
|
|
||||||
var zdaf uint32
|
|
||||||
zdaf, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zdaf > 0 {
|
|
||||||
zdaf--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "cardinality":
|
|
||||||
z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "bitmap":
|
|
||||||
var zpks uint32
|
|
||||||
zpks, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.ptr.bitmap) >= int(zpks) {
|
|
||||||
z.ptr.bitmap = (z.ptr.bitmap)[:zpks]
|
|
||||||
} else {
|
|
||||||
z.ptr.bitmap = make([]uint64, zpks)
|
|
||||||
}
|
|
||||||
for zwht := range z.ptr.bitmap {
|
|
||||||
z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "i":
|
|
||||||
z.i, bts, err = msgp.ReadIntBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z *bitmapContainerShortIterator) Msgsize() (s int) {
|
|
||||||
s = 1 + 4
|
|
||||||
if z.ptr == nil {
|
|
||||||
s += msgp.NilSize
|
|
||||||
} else {
|
|
||||||
s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size))
|
|
||||||
}
|
|
||||||
s += 2 + msgp.IntSize
|
|
||||||
return
|
|
||||||
}
|
|
11
vendor/github.com/RoaringBitmap/roaring/clz.go
generated
vendored
11
vendor/github.com/RoaringBitmap/roaring/clz.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// +build go1.9
|
|
||||||
// "go1.9", from Go version 1.9 onward
|
|
||||||
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import "math/bits"
|
|
||||||
|
|
||||||
func countLeadingZeros(x uint64) int {
|
|
||||||
return bits.LeadingZeros64(x)
|
|
||||||
}
|
|
36
vendor/github.com/RoaringBitmap/roaring/clz_compat.go
generated
vendored
36
vendor/github.com/RoaringBitmap/roaring/clz_compat.go
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
// LeadingZeroBits returns the number of consecutive most significant zero
|
|
||||||
// bits of x.
|
|
||||||
func countLeadingZeros(i uint64) int {
|
|
||||||
if i == 0 {
|
|
||||||
return 64
|
|
||||||
}
|
|
||||||
n := 1
|
|
||||||
x := uint32(i >> 32)
|
|
||||||
if x == 0 {
|
|
||||||
n += 32
|
|
||||||
x = uint32(i)
|
|
||||||
}
|
|
||||||
if (x >> 16) == 0 {
|
|
||||||
n += 16
|
|
||||||
x <<= 16
|
|
||||||
}
|
|
||||||
if (x >> 24) == 0 {
|
|
||||||
n += 8
|
|
||||||
x <<= 8
|
|
||||||
}
|
|
||||||
if x>>28 == 0 {
|
|
||||||
n += 4
|
|
||||||
x <<= 4
|
|
||||||
}
|
|
||||||
if x>>30 == 0 {
|
|
||||||
n += 2
|
|
||||||
x <<= 2
|
|
||||||
|
|
||||||
}
|
|
||||||
n -= int(x >> 31)
|
|
||||||
return n
|
|
||||||
}
|
|
11
vendor/github.com/RoaringBitmap/roaring/ctz.go
generated
vendored
11
vendor/github.com/RoaringBitmap/roaring/ctz.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// +build go1.9
|
|
||||||
// "go1.9", from Go version 1.9 onward
|
|
||||||
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import "math/bits"
|
|
||||||
|
|
||||||
func countTrailingZeros(x uint64) int {
|
|
||||||
return bits.TrailingZeros64(x)
|
|
||||||
}
|
|
71
vendor/github.com/RoaringBitmap/roaring/ctz_compat.go
generated
vendored
71
vendor/github.com/RoaringBitmap/roaring/ctz_compat.go
generated
vendored
@ -1,71 +0,0 @@
|
|||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
// Reuse of portions of go/src/math/big standard lib code
|
|
||||||
// under this license:
|
|
||||||
/*
|
|
||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
const deBruijn32 = 0x077CB531
|
|
||||||
|
|
||||||
var deBruijn32Lookup = []byte{
|
|
||||||
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
|
|
||||||
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
|
|
||||||
}
|
|
||||||
|
|
||||||
const deBruijn64 = 0x03f79d71b4ca8b09
|
|
||||||
|
|
||||||
var deBruijn64Lookup = []byte{
|
|
||||||
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
|
|
||||||
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
|
|
||||||
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
|
|
||||||
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
|
|
||||||
}
|
|
||||||
|
|
||||||
// trailingZeroBits returns the number of consecutive least significant zero
|
|
||||||
// bits of x.
|
|
||||||
func countTrailingZeros(x uint64) int {
|
|
||||||
// x & -x leaves only the right-most bit set in the word. Let k be the
|
|
||||||
// index of that bit. Since only a single bit is set, the value is two
|
|
||||||
// to the power of k. Multiplying by a power of two is equivalent to
|
|
||||||
// left shifting, in this case by k bits. The de Bruijn constant is
|
|
||||||
// such that all six bit, consecutive substrings are distinct.
|
|
||||||
// Therefore, if we have a left shifted version of this constant we can
|
|
||||||
// find by how many bits it was shifted by looking at which six bit
|
|
||||||
// substring ended up at the top of the word.
|
|
||||||
// (Knuth, volume 4, section 7.3.1)
|
|
||||||
if x == 0 {
|
|
||||||
// We have to special case 0; the fomula
|
|
||||||
// below doesn't work for 0.
|
|
||||||
return 64
|
|
||||||
}
|
|
||||||
return int(deBruijn64Lookup[((x&-x)*(deBruijn64))>>58])
|
|
||||||
}
|
|
215
vendor/github.com/RoaringBitmap/roaring/fastaggregation.go
generated
vendored
215
vendor/github.com/RoaringBitmap/roaring/fastaggregation.go
generated
vendored
@ -1,215 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/heap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Or function that requires repairAfterLazy
|
|
||||||
func lazyOR(x1, x2 *Bitmap) *Bitmap {
|
|
||||||
answer := NewBitmap()
|
|
||||||
pos1 := 0
|
|
||||||
pos2 := 0
|
|
||||||
length1 := x1.highlowcontainer.size()
|
|
||||||
length2 := x2.highlowcontainer.size()
|
|
||||||
main:
|
|
||||||
for (pos1 < length1) && (pos2 < length2) {
|
|
||||||
s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
|
|
||||||
pos1++
|
|
||||||
if pos1 == length1 {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
} else if s1 > s2 {
|
|
||||||
answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2)
|
|
||||||
pos2++
|
|
||||||
if pos2 == length2 {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
} else {
|
|
||||||
c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
|
|
||||||
switch t := c1.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
c1 = t.toBitmapContainer()
|
|
||||||
case *runContainer16:
|
|
||||||
if !t.isFull() {
|
|
||||||
c1 = t.toBitmapContainer()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
|
|
||||||
pos1++
|
|
||||||
pos2++
|
|
||||||
if (pos1 == length1) || (pos2 == length2) {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pos1 == length1 {
|
|
||||||
answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
|
|
||||||
} else if pos2 == length2 {
|
|
||||||
answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
|
|
||||||
}
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
// In-place Or function that requires repairAfterLazy
|
|
||||||
func (x1 *Bitmap) lazyOR(x2 *Bitmap) *Bitmap {
|
|
||||||
pos1 := 0
|
|
||||||
pos2 := 0
|
|
||||||
length1 := x1.highlowcontainer.size()
|
|
||||||
length2 := x2.highlowcontainer.size()
|
|
||||||
main:
|
|
||||||
for (pos1 < length1) && (pos2 < length2) {
|
|
||||||
s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
pos1++
|
|
||||||
if pos1 == length1 {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
} else if s1 > s2 {
|
|
||||||
x1.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone())
|
|
||||||
pos2++
|
|
||||||
pos1++
|
|
||||||
length1++
|
|
||||||
if pos2 == length2 {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
} else {
|
|
||||||
c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
|
|
||||||
switch t := c1.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
c1 = t.toBitmapContainer()
|
|
||||||
case *runContainer16:
|
|
||||||
if !t.isFull() {
|
|
||||||
c1 = t.toBitmapContainer()
|
|
||||||
}
|
|
||||||
case *bitmapContainer:
|
|
||||||
c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1)
|
|
||||||
}
|
|
||||||
|
|
||||||
x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2))
|
|
||||||
x1.highlowcontainer.needCopyOnWrite[pos1] = false
|
|
||||||
pos1++
|
|
||||||
pos2++
|
|
||||||
if (pos1 == length1) || (pos2 == length2) {
|
|
||||||
break main
|
|
||||||
}
|
|
||||||
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
|
|
||||||
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pos1 == length1 {
|
|
||||||
x1.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
|
|
||||||
}
|
|
||||||
return x1
|
|
||||||
}
|
|
||||||
|
|
||||||
// to be called after lazy aggregates
|
|
||||||
func (x1 *Bitmap) repairAfterLazy() {
|
|
||||||
for pos := 0; pos < x1.highlowcontainer.size(); pos++ {
|
|
||||||
c := x1.highlowcontainer.getContainerAtIndex(pos)
|
|
||||||
switch c.(type) {
|
|
||||||
case *bitmapContainer:
|
|
||||||
if c.(*bitmapContainer).cardinality == invalidCardinality {
|
|
||||||
c = x1.highlowcontainer.getWritableContainerAtIndex(pos)
|
|
||||||
c.(*bitmapContainer).computeCardinality()
|
|
||||||
if c.(*bitmapContainer).getCardinality() <= arrayDefaultMaxSize {
|
|
||||||
x1.highlowcontainer.setContainerAtIndex(pos, c.(*bitmapContainer).toArrayContainer())
|
|
||||||
} else if c.(*bitmapContainer).isFull() {
|
|
||||||
x1.highlowcontainer.setContainerAtIndex(pos, newRunContainer16Range(0, MaxUint16))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FastAnd computes the intersection between many bitmaps quickly
|
|
||||||
// Compared to the And function, it can take many bitmaps as input, thus saving the trouble
|
|
||||||
// of manually calling "And" many times.
|
|
||||||
func FastAnd(bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
if len(bitmaps) == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
} else if len(bitmaps) == 1 {
|
|
||||||
return bitmaps[0].Clone()
|
|
||||||
}
|
|
||||||
answer := And(bitmaps[0], bitmaps[1])
|
|
||||||
for _, bm := range bitmaps[2:] {
|
|
||||||
answer.And(bm)
|
|
||||||
}
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
// FastOr computes the union between many bitmaps quickly, as opposed to having to call Or repeatedly.
|
|
||||||
// It might also be faster than calling Or repeatedly.
|
|
||||||
func FastOr(bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
if len(bitmaps) == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
} else if len(bitmaps) == 1 {
|
|
||||||
return bitmaps[0].Clone()
|
|
||||||
}
|
|
||||||
answer := lazyOR(bitmaps[0], bitmaps[1])
|
|
||||||
for _, bm := range bitmaps[2:] {
|
|
||||||
answer = answer.lazyOR(bm)
|
|
||||||
}
|
|
||||||
// here is where repairAfterLazy is called.
|
|
||||||
answer.repairAfterLazy()
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeapOr computes the union between many bitmaps quickly using a heap.
|
|
||||||
// It might be faster than calling Or repeatedly.
|
|
||||||
func HeapOr(bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
if len(bitmaps) == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
}
|
|
||||||
// TODO: for better speed, we could do the operation lazily, see Java implementation
|
|
||||||
pq := make(priorityQueue, len(bitmaps))
|
|
||||||
for i, bm := range bitmaps {
|
|
||||||
pq[i] = &item{bm, i}
|
|
||||||
}
|
|
||||||
heap.Init(&pq)
|
|
||||||
|
|
||||||
for pq.Len() > 1 {
|
|
||||||
x1 := heap.Pop(&pq).(*item)
|
|
||||||
x2 := heap.Pop(&pq).(*item)
|
|
||||||
heap.Push(&pq, &item{Or(x1.value, x2.value), 0})
|
|
||||||
}
|
|
||||||
return heap.Pop(&pq).(*item).value
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeapXor computes the symmetric difference between many bitmaps quickly (as opposed to calling Xor repeated).
|
|
||||||
// Internally, this function uses a heap.
|
|
||||||
// It might be faster than calling Xor repeatedly.
|
|
||||||
func HeapXor(bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
if len(bitmaps) == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pq := make(priorityQueue, len(bitmaps))
|
|
||||||
for i, bm := range bitmaps {
|
|
||||||
pq[i] = &item{bm, i}
|
|
||||||
}
|
|
||||||
heap.Init(&pq)
|
|
||||||
|
|
||||||
for pq.Len() > 1 {
|
|
||||||
x1 := heap.Pop(&pq).(*item)
|
|
||||||
x2 := heap.Pop(&pq).(*item)
|
|
||||||
heap.Push(&pq, &item{Xor(x1.value, x2.value), 0})
|
|
||||||
}
|
|
||||||
return heap.Pop(&pq).(*item).value
|
|
||||||
}
|
|
23
vendor/github.com/RoaringBitmap/roaring/manyiterator.go
generated
vendored
23
vendor/github.com/RoaringBitmap/roaring/manyiterator.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
type manyIterable interface {
|
|
||||||
nextMany(hs uint32, buf []uint32) int
|
|
||||||
}
|
|
||||||
|
|
||||||
type manyIterator struct {
|
|
||||||
slice []uint16
|
|
||||||
loc int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si *manyIterator) nextMany(hs uint32, buf []uint32) int {
|
|
||||||
n := 0
|
|
||||||
l := si.loc
|
|
||||||
s := si.slice
|
|
||||||
for n < len(buf) && l < len(s) {
|
|
||||||
buf[n] = uint32(s[l]) | hs
|
|
||||||
l++
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
si.loc = l
|
|
||||||
return n
|
|
||||||
}
|
|
613
vendor/github.com/RoaringBitmap/roaring/parallel.go
generated
vendored
613
vendor/github.com/RoaringBitmap/roaring/parallel.go
generated
vendored
@ -1,613 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/heap"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultWorkerCount = runtime.NumCPU()
|
|
||||||
|
|
||||||
type bitmapContainerKey struct {
|
|
||||||
key uint16
|
|
||||||
idx int
|
|
||||||
bitmap *Bitmap
|
|
||||||
}
|
|
||||||
|
|
||||||
type multipleContainers struct {
|
|
||||||
key uint16
|
|
||||||
containers []container
|
|
||||||
idx int
|
|
||||||
}
|
|
||||||
|
|
||||||
type keyedContainer struct {
|
|
||||||
key uint16
|
|
||||||
container container
|
|
||||||
idx int
|
|
||||||
}
|
|
||||||
|
|
||||||
type bitmapContainerHeap []bitmapContainerKey
|
|
||||||
|
|
||||||
func (h bitmapContainerHeap) Len() int { return len(h) }
|
|
||||||
func (h bitmapContainerHeap) Less(i, j int) bool { return h[i].key < h[j].key }
|
|
||||||
func (h bitmapContainerHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
|
||||||
|
|
||||||
func (h *bitmapContainerHeap) Push(x interface{}) {
|
|
||||||
// Push and Pop use pointer receivers because they modify the slice's length,
|
|
||||||
// not just its contents.
|
|
||||||
*h = append(*h, x.(bitmapContainerKey))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *bitmapContainerHeap) Pop() interface{} {
|
|
||||||
old := *h
|
|
||||||
n := len(old)
|
|
||||||
x := old[n-1]
|
|
||||||
*h = old[0 : n-1]
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h bitmapContainerHeap) Peek() bitmapContainerKey {
|
|
||||||
return h[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *bitmapContainerHeap) popIncrementing() (key uint16, container container) {
|
|
||||||
k := h.Peek()
|
|
||||||
key = k.key
|
|
||||||
container = k.bitmap.highlowcontainer.containers[k.idx]
|
|
||||||
|
|
||||||
newIdx := k.idx + 1
|
|
||||||
if newIdx < k.bitmap.highlowcontainer.size() {
|
|
||||||
k = bitmapContainerKey{
|
|
||||||
k.bitmap.highlowcontainer.keys[newIdx],
|
|
||||||
newIdx,
|
|
||||||
k.bitmap,
|
|
||||||
}
|
|
||||||
(*h)[0] = k
|
|
||||||
heap.Fix(h, 0)
|
|
||||||
} else {
|
|
||||||
heap.Pop(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *bitmapContainerHeap) Next(containers []container) multipleContainers {
|
|
||||||
if h.Len() == 0 {
|
|
||||||
return multipleContainers{}
|
|
||||||
}
|
|
||||||
|
|
||||||
key, container := h.popIncrementing()
|
|
||||||
containers = append(containers, container)
|
|
||||||
|
|
||||||
for h.Len() > 0 && key == h.Peek().key {
|
|
||||||
_, container = h.popIncrementing()
|
|
||||||
containers = append(containers, container)
|
|
||||||
}
|
|
||||||
|
|
||||||
return multipleContainers{
|
|
||||||
key,
|
|
||||||
containers,
|
|
||||||
-1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBitmapContainerHeap(bitmaps ...*Bitmap) bitmapContainerHeap {
|
|
||||||
// Initialize heap
|
|
||||||
var h bitmapContainerHeap = make([]bitmapContainerKey, 0, len(bitmaps))
|
|
||||||
for _, bitmap := range bitmaps {
|
|
||||||
if !bitmap.IsEmpty() {
|
|
||||||
key := bitmapContainerKey{
|
|
||||||
bitmap.highlowcontainer.keys[0],
|
|
||||||
0,
|
|
||||||
bitmap,
|
|
||||||
}
|
|
||||||
h = append(h, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
heap.Init(&h)
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func repairAfterLazy(c container) container {
|
|
||||||
switch t := c.(type) {
|
|
||||||
case *bitmapContainer:
|
|
||||||
if t.cardinality == invalidCardinality {
|
|
||||||
t.computeCardinality()
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.getCardinality() <= arrayDefaultMaxSize {
|
|
||||||
return t.toArrayContainer()
|
|
||||||
} else if c.(*bitmapContainer).isFull() {
|
|
||||||
return newRunContainer16Range(0, MaxUint16)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBitmapContainer(c container) container {
|
|
||||||
switch t := c.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
return t.toBitmapContainer()
|
|
||||||
case *runContainer16:
|
|
||||||
if !t.isFull() {
|
|
||||||
return t.toBitmapContainer()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) {
|
|
||||||
expectedKeys := -1
|
|
||||||
appendedKeys := 0
|
|
||||||
var keys []uint16
|
|
||||||
var containers []container
|
|
||||||
for appendedKeys != expectedKeys {
|
|
||||||
select {
|
|
||||||
case item := <-resultChan:
|
|
||||||
if len(keys) <= item.idx {
|
|
||||||
keys = append(keys, make([]uint16, item.idx-len(keys)+1)...)
|
|
||||||
containers = append(containers, make([]container, item.idx-len(containers)+1)...)
|
|
||||||
}
|
|
||||||
keys[item.idx] = item.key
|
|
||||||
containers[item.idx] = item.container
|
|
||||||
|
|
||||||
appendedKeys++
|
|
||||||
case msg := <-expectedKeysChan:
|
|
||||||
expectedKeys = msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
answer := &Bitmap{
|
|
||||||
roaringArray{
|
|
||||||
make([]uint16, 0, expectedKeys),
|
|
||||||
make([]container, 0, expectedKeys),
|
|
||||||
make([]bool, 0, expectedKeys),
|
|
||||||
false,
|
|
||||||
nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i := range keys {
|
|
||||||
if containers[i] != nil { // in case a resulting container was empty, see ParAnd function
|
|
||||||
answer.highlowcontainer.appendContainer(keys[i], containers[i], false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bitmapChan <- answer
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParHeapOr computes the union (OR) of all provided bitmaps in parallel,
|
|
||||||
// where the parameter "parallelism" determines how many workers are to be used
|
|
||||||
// (if it is set to 0, a default number of workers is chosen)
|
|
||||||
// ParHeapOr uses a heap to compute the union. For rare cases it might be faster than ParOr
|
|
||||||
func ParHeapOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
|
|
||||||
bitmapCount := len(bitmaps)
|
|
||||||
if bitmapCount == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
} else if bitmapCount == 1 {
|
|
||||||
return bitmaps[0].Clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
if parallelism == 0 {
|
|
||||||
parallelism = defaultWorkerCount
|
|
||||||
}
|
|
||||||
|
|
||||||
h := newBitmapContainerHeap(bitmaps...)
|
|
||||||
|
|
||||||
bitmapChan := make(chan *Bitmap)
|
|
||||||
inputChan := make(chan multipleContainers, 128)
|
|
||||||
resultChan := make(chan keyedContainer, 32)
|
|
||||||
expectedKeysChan := make(chan int)
|
|
||||||
|
|
||||||
pool := sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return make([]container, 0, len(bitmaps))
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
orFunc := func() {
|
|
||||||
// Assumes only structs with >=2 containers are passed
|
|
||||||
for input := range inputChan {
|
|
||||||
c := toBitmapContainer(input.containers[0]).lazyOR(input.containers[1])
|
|
||||||
for _, next := range input.containers[2:] {
|
|
||||||
c = c.lazyIOR(next)
|
|
||||||
}
|
|
||||||
c = repairAfterLazy(c)
|
|
||||||
kx := keyedContainer{
|
|
||||||
input.key,
|
|
||||||
c,
|
|
||||||
input.idx,
|
|
||||||
}
|
|
||||||
resultChan <- kx
|
|
||||||
pool.Put(input.containers[:0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
|
|
||||||
|
|
||||||
for i := 0; i < parallelism; i++ {
|
|
||||||
go orFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
idx := 0
|
|
||||||
for h.Len() > 0 {
|
|
||||||
ck := h.Next(pool.Get().([]container))
|
|
||||||
if len(ck.containers) == 1 {
|
|
||||||
resultChan <- keyedContainer{
|
|
||||||
ck.key,
|
|
||||||
ck.containers[0],
|
|
||||||
idx,
|
|
||||||
}
|
|
||||||
pool.Put(ck.containers[:0])
|
|
||||||
} else {
|
|
||||||
ck.idx = idx
|
|
||||||
inputChan <- ck
|
|
||||||
}
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
expectedKeysChan <- idx
|
|
||||||
|
|
||||||
bitmap := <-bitmapChan
|
|
||||||
|
|
||||||
close(inputChan)
|
|
||||||
close(resultChan)
|
|
||||||
close(expectedKeysChan)
|
|
||||||
|
|
||||||
return bitmap
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParAnd computes the intersection (AND) of all provided bitmaps in parallel,
|
|
||||||
// where the parameter "parallelism" determines how many workers are to be used
|
|
||||||
// (if it is set to 0, a default number of workers is chosen)
|
|
||||||
func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
bitmapCount := len(bitmaps)
|
|
||||||
if bitmapCount == 0 {
|
|
||||||
return NewBitmap()
|
|
||||||
} else if bitmapCount == 1 {
|
|
||||||
return bitmaps[0].Clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
if parallelism == 0 {
|
|
||||||
parallelism = defaultWorkerCount
|
|
||||||
}
|
|
||||||
|
|
||||||
h := newBitmapContainerHeap(bitmaps...)
|
|
||||||
|
|
||||||
bitmapChan := make(chan *Bitmap)
|
|
||||||
inputChan := make(chan multipleContainers, 128)
|
|
||||||
resultChan := make(chan keyedContainer, 32)
|
|
||||||
expectedKeysChan := make(chan int)
|
|
||||||
|
|
||||||
andFunc := func() {
|
|
||||||
// Assumes only structs with >=2 containers are passed
|
|
||||||
for input := range inputChan {
|
|
||||||
c := input.containers[0].and(input.containers[1])
|
|
||||||
for _, next := range input.containers[2:] {
|
|
||||||
if c.getCardinality() == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c = c.iand(next)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a nil explicitly if the result of the intersection is an empty container
|
|
||||||
if c.getCardinality() == 0 {
|
|
||||||
c = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
kx := keyedContainer{
|
|
||||||
input.key,
|
|
||||||
c,
|
|
||||||
input.idx,
|
|
||||||
}
|
|
||||||
resultChan <- kx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
|
|
||||||
|
|
||||||
for i := 0; i < parallelism; i++ {
|
|
||||||
go andFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
idx := 0
|
|
||||||
for h.Len() > 0 {
|
|
||||||
ck := h.Next(make([]container, 0, 4))
|
|
||||||
if len(ck.containers) == bitmapCount {
|
|
||||||
ck.idx = idx
|
|
||||||
inputChan <- ck
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
expectedKeysChan <- idx
|
|
||||||
|
|
||||||
bitmap := <-bitmapChan
|
|
||||||
|
|
||||||
close(inputChan)
|
|
||||||
close(resultChan)
|
|
||||||
close(expectedKeysChan)
|
|
||||||
|
|
||||||
return bitmap
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParOr computes the union (OR) of all provided bitmaps in parallel,
|
|
||||||
// where the parameter "parallelism" determines how many workers are to be used
|
|
||||||
// (if it is set to 0, a default number of workers is chosen)
|
|
||||||
func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
|
|
||||||
var lKey uint16 = MaxUint16
|
|
||||||
var hKey uint16
|
|
||||||
|
|
||||||
bitmapsFiltered := bitmaps[:0]
|
|
||||||
for _, b := range bitmaps {
|
|
||||||
if !b.IsEmpty() {
|
|
||||||
bitmapsFiltered = append(bitmapsFiltered, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bitmaps = bitmapsFiltered
|
|
||||||
|
|
||||||
for _, b := range bitmaps {
|
|
||||||
lKey = minOfUint16(lKey, b.highlowcontainer.keys[0])
|
|
||||||
hKey = maxOfUint16(hKey, b.highlowcontainer.keys[b.highlowcontainer.size()-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
if lKey == MaxUint16 && hKey == 0 {
|
|
||||||
return New()
|
|
||||||
} else if len(bitmaps) == 1 {
|
|
||||||
return bitmaps[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
keyRange := hKey - lKey + 1
|
|
||||||
if keyRange == 1 {
|
|
||||||
// revert to FastOr. Since the key range is 0
|
|
||||||
// no container-level aggregation parallelism is achievable
|
|
||||||
return FastOr(bitmaps...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if parallelism == 0 {
|
|
||||||
parallelism = defaultWorkerCount
|
|
||||||
}
|
|
||||||
|
|
||||||
var chunkSize int
|
|
||||||
var chunkCount int
|
|
||||||
if parallelism*4 > int(keyRange) {
|
|
||||||
chunkSize = 1
|
|
||||||
chunkCount = int(keyRange)
|
|
||||||
} else {
|
|
||||||
chunkCount = parallelism * 4
|
|
||||||
chunkSize = (int(keyRange) + chunkCount - 1) / chunkCount
|
|
||||||
}
|
|
||||||
|
|
||||||
if chunkCount*chunkSize < int(keyRange) {
|
|
||||||
// it's fine to panic to indicate an implementation error
|
|
||||||
panic(fmt.Sprintf("invariant check failed: chunkCount * chunkSize < keyRange, %d * %d < %d", chunkCount, chunkSize, keyRange))
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks := make([]*roaringArray, chunkCount)
|
|
||||||
|
|
||||||
chunkSpecChan := make(chan parChunkSpec, minOfInt(maxOfInt(64, 2*parallelism), int(chunkCount)))
|
|
||||||
chunkChan := make(chan parChunk, minOfInt(32, int(chunkCount)))
|
|
||||||
|
|
||||||
orFunc := func() {
|
|
||||||
for spec := range chunkSpecChan {
|
|
||||||
ra := lazyOrOnRange(&bitmaps[0].highlowcontainer, &bitmaps[1].highlowcontainer, spec.start, spec.end)
|
|
||||||
for _, b := range bitmaps[2:] {
|
|
||||||
ra = lazyIOrOnRange(ra, &b.highlowcontainer, spec.start, spec.end)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, c := range ra.containers {
|
|
||||||
ra.containers[i] = repairAfterLazy(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkChan <- parChunk{ra, spec.idx}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < parallelism; i++ {
|
|
||||||
go orFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for i := 0; i < chunkCount; i++ {
|
|
||||||
spec := parChunkSpec{
|
|
||||||
start: uint16(int(lKey) + i*chunkSize),
|
|
||||||
end: uint16(minOfInt(int(lKey)+(i+1)*chunkSize-1, int(hKey))),
|
|
||||||
idx: int(i),
|
|
||||||
}
|
|
||||||
chunkSpecChan <- spec
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
chunksRemaining := chunkCount
|
|
||||||
for chunk := range chunkChan {
|
|
||||||
chunks[chunk.idx] = chunk.ra
|
|
||||||
chunksRemaining--
|
|
||||||
if chunksRemaining == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(chunkChan)
|
|
||||||
close(chunkSpecChan)
|
|
||||||
|
|
||||||
containerCount := 0
|
|
||||||
for _, chunk := range chunks {
|
|
||||||
containerCount += chunk.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
result := Bitmap{
|
|
||||||
roaringArray{
|
|
||||||
containers: make([]container, containerCount),
|
|
||||||
keys: make([]uint16, containerCount),
|
|
||||||
needCopyOnWrite: make([]bool, containerCount),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
resultOffset := 0
|
|
||||||
for _, chunk := range chunks {
|
|
||||||
copy(result.highlowcontainer.containers[resultOffset:], chunk.containers)
|
|
||||||
copy(result.highlowcontainer.keys[resultOffset:], chunk.keys)
|
|
||||||
copy(result.highlowcontainer.needCopyOnWrite[resultOffset:], chunk.needCopyOnWrite)
|
|
||||||
resultOffset += chunk.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
type parChunkSpec struct {
|
|
||||||
start uint16
|
|
||||||
end uint16
|
|
||||||
idx int
|
|
||||||
}
|
|
||||||
|
|
||||||
type parChunk struct {
|
|
||||||
ra *roaringArray
|
|
||||||
idx int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c parChunk) size() int {
|
|
||||||
return c.ra.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
func parNaiveStartAt(ra *roaringArray, start uint16, last uint16) int {
|
|
||||||
for idx, key := range ra.keys {
|
|
||||||
if key >= start && key <= last {
|
|
||||||
return idx
|
|
||||||
} else if key > last {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ra.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
func lazyOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
|
|
||||||
answer := newRoaringArray()
|
|
||||||
length1 := ra1.size()
|
|
||||||
length2 := ra2.size()
|
|
||||||
|
|
||||||
idx1 := parNaiveStartAt(ra1, start, last)
|
|
||||||
idx2 := parNaiveStartAt(ra2, start, last)
|
|
||||||
|
|
||||||
var key1 uint16
|
|
||||||
var key2 uint16
|
|
||||||
if idx1 < length1 && idx2 < length2 {
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
|
|
||||||
for key1 <= last && key2 <= last {
|
|
||||||
|
|
||||||
if key1 < key2 {
|
|
||||||
answer.appendCopy(*ra1, idx1)
|
|
||||||
idx1++
|
|
||||||
if idx1 == length1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
} else if key1 > key2 {
|
|
||||||
answer.appendCopy(*ra2, idx2)
|
|
||||||
idx2++
|
|
||||||
if idx2 == length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
} else {
|
|
||||||
c1 := ra1.getFastContainerAtIndex(idx1, false)
|
|
||||||
|
|
||||||
answer.appendContainer(key1, c1.lazyOR(ra2.getContainerAtIndex(idx2)), false)
|
|
||||||
idx1++
|
|
||||||
idx2++
|
|
||||||
if idx1 == length1 || idx2 == length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx2 < length2 {
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
for key2 <= last {
|
|
||||||
answer.appendCopy(*ra2, idx2)
|
|
||||||
idx2++
|
|
||||||
if idx2 == length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx1 < length1 {
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
for key1 <= last {
|
|
||||||
answer.appendCopy(*ra1, idx1)
|
|
||||||
idx1++
|
|
||||||
if idx1 == length1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func lazyIOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
|
|
||||||
length1 := ra1.size()
|
|
||||||
length2 := ra2.size()
|
|
||||||
|
|
||||||
idx1 := 0
|
|
||||||
idx2 := parNaiveStartAt(ra2, start, last)
|
|
||||||
|
|
||||||
var key1 uint16
|
|
||||||
var key2 uint16
|
|
||||||
if idx1 < length1 && idx2 < length2 {
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
|
|
||||||
for key1 <= last && key2 <= last {
|
|
||||||
if key1 < key2 {
|
|
||||||
idx1++
|
|
||||||
if idx1 >= length1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
} else if key1 > key2 {
|
|
||||||
ra1.insertNewKeyValueAt(idx1, key2, ra2.getContainerAtIndex(idx2))
|
|
||||||
ra1.needCopyOnWrite[idx1] = true
|
|
||||||
idx2++
|
|
||||||
idx1++
|
|
||||||
length1++
|
|
||||||
if idx2 >= length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
} else {
|
|
||||||
c1 := ra1.getFastContainerAtIndex(idx1, true)
|
|
||||||
|
|
||||||
ra1.containers[idx1] = c1.lazyIOR(ra2.getContainerAtIndex(idx2))
|
|
||||||
ra1.needCopyOnWrite[idx1] = false
|
|
||||||
idx1++
|
|
||||||
idx2++
|
|
||||||
if idx1 >= length1 || idx2 >= length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
key1 = ra1.getKeyAtIndex(idx1)
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if idx2 < length2 {
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
for key2 <= last {
|
|
||||||
ra1.appendCopy(*ra2, idx2)
|
|
||||||
idx2++
|
|
||||||
if idx2 >= length2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key2 = ra2.getKeyAtIndex(idx2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ra1
|
|
||||||
}
|
|
11
vendor/github.com/RoaringBitmap/roaring/popcnt.go
generated
vendored
11
vendor/github.com/RoaringBitmap/roaring/popcnt.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// +build go1.9
|
|
||||||
// "go1.9", from Go version 1.9 onward
|
|
||||||
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import "math/bits"
|
|
||||||
|
|
||||||
func popcount(x uint64) uint64 {
|
|
||||||
return uint64(bits.OnesCount64(x))
|
|
||||||
}
|
|
103
vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s
generated
vendored
103
vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s
generated
vendored
@ -1,103 +0,0 @@
|
|||||||
// +build amd64,!appengine,!go1.9
|
|
||||||
|
|
||||||
TEXT ·hasAsm(SB),4,$0-1
|
|
||||||
MOVQ $1, AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $23, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
|
|
||||||
|
|
||||||
TEXT ·popcntSliceAsm(SB),4,$0-32
|
|
||||||
XORQ AX, AX
|
|
||||||
MOVQ s+0(FP), SI
|
|
||||||
MOVQ s_len+8(FP), CX
|
|
||||||
TESTQ CX, CX
|
|
||||||
JZ popcntSliceEnd
|
|
||||||
popcntSliceLoop:
|
|
||||||
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
|
|
||||||
ADDQ DX, AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
LOOP popcntSliceLoop
|
|
||||||
popcntSliceEnd:
|
|
||||||
MOVQ AX, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
|
|
||||||
XORQ AX, AX
|
|
||||||
MOVQ s+0(FP), SI
|
|
||||||
MOVQ s_len+8(FP), CX
|
|
||||||
TESTQ CX, CX
|
|
||||||
JZ popcntMaskSliceEnd
|
|
||||||
MOVQ m+24(FP), DI
|
|
||||||
popcntMaskSliceLoop:
|
|
||||||
MOVQ (DI), DX
|
|
||||||
NOTQ DX
|
|
||||||
ANDQ (SI), DX
|
|
||||||
POPCNTQ_DX_DX
|
|
||||||
ADDQ DX, AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
ADDQ $8, DI
|
|
||||||
LOOP popcntMaskSliceLoop
|
|
||||||
popcntMaskSliceEnd:
|
|
||||||
MOVQ AX, ret+48(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
TEXT ·popcntAndSliceAsm(SB),4,$0-56
|
|
||||||
XORQ AX, AX
|
|
||||||
MOVQ s+0(FP), SI
|
|
||||||
MOVQ s_len+8(FP), CX
|
|
||||||
TESTQ CX, CX
|
|
||||||
JZ popcntAndSliceEnd
|
|
||||||
MOVQ m+24(FP), DI
|
|
||||||
popcntAndSliceLoop:
|
|
||||||
MOVQ (DI), DX
|
|
||||||
ANDQ (SI), DX
|
|
||||||
POPCNTQ_DX_DX
|
|
||||||
ADDQ DX, AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
ADDQ $8, DI
|
|
||||||
LOOP popcntAndSliceLoop
|
|
||||||
popcntAndSliceEnd:
|
|
||||||
MOVQ AX, ret+48(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
TEXT ·popcntOrSliceAsm(SB),4,$0-56
|
|
||||||
XORQ AX, AX
|
|
||||||
MOVQ s+0(FP), SI
|
|
||||||
MOVQ s_len+8(FP), CX
|
|
||||||
TESTQ CX, CX
|
|
||||||
JZ popcntOrSliceEnd
|
|
||||||
MOVQ m+24(FP), DI
|
|
||||||
popcntOrSliceLoop:
|
|
||||||
MOVQ (DI), DX
|
|
||||||
ORQ (SI), DX
|
|
||||||
POPCNTQ_DX_DX
|
|
||||||
ADDQ DX, AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
ADDQ $8, DI
|
|
||||||
LOOP popcntOrSliceLoop
|
|
||||||
popcntOrSliceEnd:
|
|
||||||
MOVQ AX, ret+48(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
TEXT ·popcntXorSliceAsm(SB),4,$0-56
|
|
||||||
XORQ AX, AX
|
|
||||||
MOVQ s+0(FP), SI
|
|
||||||
MOVQ s_len+8(FP), CX
|
|
||||||
TESTQ CX, CX
|
|
||||||
JZ popcntXorSliceEnd
|
|
||||||
MOVQ m+24(FP), DI
|
|
||||||
popcntXorSliceLoop:
|
|
||||||
MOVQ (DI), DX
|
|
||||||
XORQ (SI), DX
|
|
||||||
POPCNTQ_DX_DX
|
|
||||||
ADDQ DX, AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
ADDQ $8, DI
|
|
||||||
LOOP popcntXorSliceLoop
|
|
||||||
popcntXorSliceEnd:
|
|
||||||
MOVQ AX, ret+48(FP)
|
|
||||||
RET
|
|
67
vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go
generated
vendored
67
vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
// +build amd64,!appengine,!go1.9
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
// *** the following functions are defined in popcnt_amd64.s
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func hasAsm() bool
|
|
||||||
|
|
||||||
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
|
|
||||||
var useAsm = hasAsm()
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func popcntSliceAsm(s []uint64) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func popcntMaskSliceAsm(s, m []uint64) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func popcntAndSliceAsm(s, m []uint64) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func popcntOrSliceAsm(s, m []uint64) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
|
|
||||||
func popcntXorSliceAsm(s, m []uint64) uint64
|
|
||||||
|
|
||||||
func popcntSlice(s []uint64) uint64 {
|
|
||||||
if useAsm {
|
|
||||||
return popcntSliceAsm(s)
|
|
||||||
}
|
|
||||||
return popcntSliceGo(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
|
||||||
if useAsm {
|
|
||||||
return popcntMaskSliceAsm(s, m)
|
|
||||||
}
|
|
||||||
return popcntMaskSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntAndSlice(s, m []uint64) uint64 {
|
|
||||||
if useAsm {
|
|
||||||
return popcntAndSliceAsm(s, m)
|
|
||||||
}
|
|
||||||
return popcntAndSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntOrSlice(s, m []uint64) uint64 {
|
|
||||||
if useAsm {
|
|
||||||
return popcntOrSliceAsm(s, m)
|
|
||||||
}
|
|
||||||
return popcntOrSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntXorSlice(s, m []uint64) uint64 {
|
|
||||||
if useAsm {
|
|
||||||
return popcntXorSliceAsm(s, m)
|
|
||||||
}
|
|
||||||
return popcntXorSliceGo(s, m)
|
|
||||||
}
|
|
17
vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go
generated
vendored
17
vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
// bit population count, take from
|
|
||||||
// https://code.google.com/p/go/issues/detail?id=4988#c11
|
|
||||||
// credit: https://code.google.com/u/arnehormann/
|
|
||||||
// credit: https://play.golang.org/p/U7SogJ7psJ
|
|
||||||
// credit: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
|
|
||||||
func popcount(x uint64) uint64 {
|
|
||||||
x -= (x >> 1) & 0x5555555555555555
|
|
||||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
|
||||||
x += x >> 4
|
|
||||||
x &= 0x0f0f0f0f0f0f0f0f
|
|
||||||
x *= 0x0101010101010101
|
|
||||||
return x >> 56
|
|
||||||
}
|
|
23
vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go
generated
vendored
23
vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
// +build !amd64 appengine go1.9
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
func popcntSlice(s []uint64) uint64 {
|
|
||||||
return popcntSliceGo(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
|
||||||
return popcntMaskSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntAndSlice(s, m []uint64) uint64 {
|
|
||||||
return popcntAndSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntOrSlice(s, m []uint64) uint64 {
|
|
||||||
return popcntOrSliceGo(s, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntXorSlice(s, m []uint64) uint64 {
|
|
||||||
return popcntXorSliceGo(s, m)
|
|
||||||
}
|
|
41
vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go
generated
vendored
41
vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
func popcntSliceGo(s []uint64) uint64 {
|
|
||||||
cnt := uint64(0)
|
|
||||||
for _, x := range s {
|
|
||||||
cnt += popcount(x)
|
|
||||||
}
|
|
||||||
return cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntMaskSliceGo(s, m []uint64) uint64 {
|
|
||||||
cnt := uint64(0)
|
|
||||||
for i := range s {
|
|
||||||
cnt += popcount(s[i] &^ m[i])
|
|
||||||
}
|
|
||||||
return cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntAndSliceGo(s, m []uint64) uint64 {
|
|
||||||
cnt := uint64(0)
|
|
||||||
for i := range s {
|
|
||||||
cnt += popcount(s[i] & m[i])
|
|
||||||
}
|
|
||||||
return cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntOrSliceGo(s, m []uint64) uint64 {
|
|
||||||
cnt := uint64(0)
|
|
||||||
for i := range s {
|
|
||||||
cnt += popcount(s[i] | m[i])
|
|
||||||
}
|
|
||||||
return cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func popcntXorSliceGo(s, m []uint64) uint64 {
|
|
||||||
cnt := uint64(0)
|
|
||||||
for i := range s {
|
|
||||||
cnt += popcount(s[i] ^ m[i])
|
|
||||||
}
|
|
||||||
return cnt
|
|
||||||
}
|
|
101
vendor/github.com/RoaringBitmap/roaring/priorityqueue.go
generated
vendored
101
vendor/github.com/RoaringBitmap/roaring/priorityqueue.go
generated
vendored
@ -1,101 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import "container/heap"
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// The priorityQueue is used to keep Bitmaps sorted.
|
|
||||||
////////////
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
value *Bitmap
|
|
||||||
index int
|
|
||||||
}
|
|
||||||
|
|
||||||
type priorityQueue []*item
|
|
||||||
|
|
||||||
func (pq priorityQueue) Len() int { return len(pq) }
|
|
||||||
|
|
||||||
func (pq priorityQueue) Less(i, j int) bool {
|
|
||||||
return pq[i].value.GetSizeInBytes() < pq[j].value.GetSizeInBytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq priorityQueue) Swap(i, j int) {
|
|
||||||
pq[i], pq[j] = pq[j], pq[i]
|
|
||||||
pq[i].index = i
|
|
||||||
pq[j].index = j
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq *priorityQueue) Push(x interface{}) {
|
|
||||||
n := len(*pq)
|
|
||||||
item := x.(*item)
|
|
||||||
item.index = n
|
|
||||||
*pq = append(*pq, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq *priorityQueue) Pop() interface{} {
|
|
||||||
old := *pq
|
|
||||||
n := len(old)
|
|
||||||
item := old[n-1]
|
|
||||||
item.index = -1 // for safety
|
|
||||||
*pq = old[0 : n-1]
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq *priorityQueue) update(item *item, value *Bitmap) {
|
|
||||||
item.value = value
|
|
||||||
heap.Fix(pq, item.index)
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// The containerPriorityQueue is used to keep the containers of various Bitmaps sorted.
|
|
||||||
////////////
|
|
||||||
|
|
||||||
type containeritem struct {
|
|
||||||
value *Bitmap
|
|
||||||
keyindex int
|
|
||||||
index int
|
|
||||||
}
|
|
||||||
|
|
||||||
type containerPriorityQueue []*containeritem
|
|
||||||
|
|
||||||
func (pq containerPriorityQueue) Len() int { return len(pq) }
|
|
||||||
|
|
||||||
func (pq containerPriorityQueue) Less(i, j int) bool {
|
|
||||||
k1 := pq[i].value.highlowcontainer.getKeyAtIndex(pq[i].keyindex)
|
|
||||||
k2 := pq[j].value.highlowcontainer.getKeyAtIndex(pq[j].keyindex)
|
|
||||||
if k1 != k2 {
|
|
||||||
return k1 < k2
|
|
||||||
}
|
|
||||||
c1 := pq[i].value.highlowcontainer.getContainerAtIndex(pq[i].keyindex)
|
|
||||||
c2 := pq[j].value.highlowcontainer.getContainerAtIndex(pq[j].keyindex)
|
|
||||||
|
|
||||||
return c1.getCardinality() > c2.getCardinality()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq containerPriorityQueue) Swap(i, j int) {
|
|
||||||
pq[i], pq[j] = pq[j], pq[i]
|
|
||||||
pq[i].index = i
|
|
||||||
pq[j].index = j
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq *containerPriorityQueue) Push(x interface{}) {
|
|
||||||
n := len(*pq)
|
|
||||||
item := x.(*containeritem)
|
|
||||||
item.index = n
|
|
||||||
*pq = append(*pq, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pq *containerPriorityQueue) Pop() interface{} {
|
|
||||||
old := *pq
|
|
||||||
n := len(old)
|
|
||||||
item := old[n-1]
|
|
||||||
item.index = -1 // for safety
|
|
||||||
*pq = old[0 : n-1]
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
//func (pq *containerPriorityQueue) update(item *containeritem, value *Bitmap, keyindex int) {
|
|
||||||
// item.value = value
|
|
||||||
// item.keyindex = keyindex
|
|
||||||
// heap.Fix(pq, item.index)
|
|
||||||
//}
|
|
1423
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
1423
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
File diff suppressed because it is too large
Load Diff
891
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
891
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
@ -1,891 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
snappy "github.com/glycerine/go-unsnap-stream"
|
|
||||||
"github.com/tinylib/msgp/msgp"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate msgp -unexported
|
|
||||||
|
|
||||||
type container interface {
|
|
||||||
addOffset(uint16) []container
|
|
||||||
|
|
||||||
clone() container
|
|
||||||
and(container) container
|
|
||||||
andCardinality(container) int
|
|
||||||
iand(container) container // i stands for inplace
|
|
||||||
andNot(container) container
|
|
||||||
iandNot(container) container // i stands for inplace
|
|
||||||
getCardinality() int
|
|
||||||
// rank returns the number of integers that are
|
|
||||||
// smaller or equal to x. rank(infinity) would be getCardinality().
|
|
||||||
rank(uint16) int
|
|
||||||
|
|
||||||
iadd(x uint16) bool // inplace, returns true if x was new.
|
|
||||||
iaddReturnMinimized(uint16) container // may change return type to minimize storage.
|
|
||||||
|
|
||||||
//addRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
|
|
||||||
iaddRange(start, endx int) container // i stands for inplace, range is [firstOfRange,endx)
|
|
||||||
|
|
||||||
iremove(x uint16) bool // inplace, returns true if x was present.
|
|
||||||
iremoveReturnMinimized(uint16) container // may change return type to minimize storage.
|
|
||||||
|
|
||||||
not(start, final int) container // range is [firstOfRange,lastOfRange)
|
|
||||||
inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx)
|
|
||||||
xor(r container) container
|
|
||||||
getShortIterator() shortIterable
|
|
||||||
getReverseIterator() shortIterable
|
|
||||||
getManyIterator() manyIterable
|
|
||||||
contains(i uint16) bool
|
|
||||||
maximum() uint16
|
|
||||||
minimum() uint16
|
|
||||||
|
|
||||||
// equals is now logical equals; it does not require the
|
|
||||||
// same underlying container types, but compares across
|
|
||||||
// any of the implementations.
|
|
||||||
equals(r container) bool
|
|
||||||
|
|
||||||
fillLeastSignificant16bits(array []uint32, i int, mask uint32)
|
|
||||||
or(r container) container
|
|
||||||
orCardinality(r container) int
|
|
||||||
isFull() bool
|
|
||||||
ior(r container) container // i stands for inplace
|
|
||||||
intersects(r container) bool // whether the two containers intersect
|
|
||||||
lazyOR(r container) container
|
|
||||||
lazyIOR(r container) container
|
|
||||||
getSizeInBytes() int
|
|
||||||
//removeRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
|
|
||||||
iremoveRange(start, final int) container // i stands for inplace, range is [firstOfRange,lastOfRange)
|
|
||||||
selectInt(x uint16) int // selectInt returns the xth integer in the container
|
|
||||||
serializedSizeInBytes() int
|
|
||||||
readFrom(io.Reader) (int, error)
|
|
||||||
writeTo(io.Writer) (int, error)
|
|
||||||
|
|
||||||
numberOfRuns() int
|
|
||||||
toEfficientContainer() container
|
|
||||||
String() string
|
|
||||||
containerType() contype
|
|
||||||
}
|
|
||||||
|
|
||||||
type contype uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
bitmapContype contype = iota
|
|
||||||
arrayContype
|
|
||||||
run16Contype
|
|
||||||
run32Contype
|
|
||||||
)
|
|
||||||
|
|
||||||
// careful: range is [firstOfRange,lastOfRange]
|
|
||||||
func rangeOfOnes(start, last int) container {
|
|
||||||
if start > MaxUint16 {
|
|
||||||
panic("rangeOfOnes called with start > MaxUint16")
|
|
||||||
}
|
|
||||||
if last > MaxUint16 {
|
|
||||||
panic("rangeOfOnes called with last > MaxUint16")
|
|
||||||
}
|
|
||||||
if start < 0 {
|
|
||||||
panic("rangeOfOnes called with start < 0")
|
|
||||||
}
|
|
||||||
if last < 0 {
|
|
||||||
panic("rangeOfOnes called with last < 0")
|
|
||||||
}
|
|
||||||
return newRunContainer16Range(uint16(start), uint16(last))
|
|
||||||
}
|
|
||||||
|
|
||||||
type roaringArray struct {
|
|
||||||
keys []uint16
|
|
||||||
containers []container `msg:"-"` // don't try to serialize directly.
|
|
||||||
needCopyOnWrite []bool
|
|
||||||
copyOnWrite bool
|
|
||||||
|
|
||||||
// conserz is used at serialization time
|
|
||||||
// to serialize containers. Otherwise empty.
|
|
||||||
conserz []containerSerz
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerSerz facilitates serializing container (tricky to
|
|
||||||
// serialize because it is an interface) by providing a
|
|
||||||
// light wrapper with a type identifier.
|
|
||||||
type containerSerz struct {
|
|
||||||
t contype `msg:"t"` // type
|
|
||||||
r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRoaringArray() *roaringArray {
|
|
||||||
return &roaringArray{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// runOptimize compresses the element containers to minimize space consumed.
|
|
||||||
// Q: how does this interact with copyOnWrite and needCopyOnWrite?
|
|
||||||
// A: since we aren't changing the logical content, just the representation,
|
|
||||||
// we don't bother to check the needCopyOnWrite bits. We replace
|
|
||||||
// (possibly all) elements of ra.containers in-place with space
|
|
||||||
// optimized versions.
|
|
||||||
func (ra *roaringArray) runOptimize() {
|
|
||||||
for i := range ra.containers {
|
|
||||||
ra.containers[i] = ra.containers[i].toEfficientContainer()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendContainer(key uint16, value container, mustCopyOnWrite bool) {
|
|
||||||
ra.keys = append(ra.keys, key)
|
|
||||||
ra.containers = append(ra.containers, value)
|
|
||||||
ra.needCopyOnWrite = append(ra.needCopyOnWrite, mustCopyOnWrite)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendWithoutCopy(sa roaringArray, startingindex int) {
|
|
||||||
mustCopyOnWrite := sa.needCopyOnWrite[startingindex]
|
|
||||||
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], mustCopyOnWrite)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendCopy(sa roaringArray, startingindex int) {
|
|
||||||
// cow only if the two request it, or if we already have a lightweight copy
|
|
||||||
copyonwrite := (ra.copyOnWrite && sa.copyOnWrite) || sa.needsCopyOnWrite(startingindex)
|
|
||||||
if !copyonwrite {
|
|
||||||
// since there is no copy-on-write, we need to clone the container (this is important)
|
|
||||||
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex].clone(), copyonwrite)
|
|
||||||
} else {
|
|
||||||
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], copyonwrite)
|
|
||||||
if !sa.needsCopyOnWrite(startingindex) {
|
|
||||||
sa.setNeedsCopyOnWrite(startingindex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendWithoutCopyMany(sa roaringArray, startingindex, end int) {
|
|
||||||
for i := startingindex; i < end; i++ {
|
|
||||||
ra.appendWithoutCopy(sa, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendCopyMany(sa roaringArray, startingindex, end int) {
|
|
||||||
for i := startingindex; i < end; i++ {
|
|
||||||
ra.appendCopy(sa, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendCopiesUntil(sa roaringArray, stoppingKey uint16) {
|
|
||||||
// cow only if the two request it, or if we already have a lightweight copy
|
|
||||||
copyonwrite := ra.copyOnWrite && sa.copyOnWrite
|
|
||||||
|
|
||||||
for i := 0; i < sa.size(); i++ {
|
|
||||||
if sa.keys[i] >= stoppingKey {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
|
|
||||||
if thiscopyonewrite {
|
|
||||||
ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
|
|
||||||
if !sa.needsCopyOnWrite(i) {
|
|
||||||
sa.setNeedsCopyOnWrite(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// since there is no copy-on-write, we need to clone the container (this is important)
|
|
||||||
ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) appendCopiesAfter(sa roaringArray, beforeStart uint16) {
|
|
||||||
// cow only if the two request it, or if we already have a lightweight copy
|
|
||||||
copyonwrite := ra.copyOnWrite && sa.copyOnWrite
|
|
||||||
|
|
||||||
startLocation := sa.getIndex(beforeStart)
|
|
||||||
if startLocation >= 0 {
|
|
||||||
startLocation++
|
|
||||||
} else {
|
|
||||||
startLocation = -startLocation - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := startLocation; i < sa.size(); i++ {
|
|
||||||
thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
|
|
||||||
if thiscopyonewrite {
|
|
||||||
ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
|
|
||||||
if !sa.needsCopyOnWrite(i) {
|
|
||||||
sa.setNeedsCopyOnWrite(i)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// since there is no copy-on-write, we need to clone the container (this is important)
|
|
||||||
ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) removeIndexRange(begin, end int) {
|
|
||||||
if end <= begin {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r := end - begin
|
|
||||||
|
|
||||||
copy(ra.keys[begin:], ra.keys[end:])
|
|
||||||
copy(ra.containers[begin:], ra.containers[end:])
|
|
||||||
copy(ra.needCopyOnWrite[begin:], ra.needCopyOnWrite[end:])
|
|
||||||
|
|
||||||
ra.resize(len(ra.keys) - r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) resize(newsize int) {
|
|
||||||
for k := newsize; k < len(ra.containers); k++ {
|
|
||||||
ra.containers[k] = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ra.keys = ra.keys[:newsize]
|
|
||||||
ra.containers = ra.containers[:newsize]
|
|
||||||
ra.needCopyOnWrite = ra.needCopyOnWrite[:newsize]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) clear() {
|
|
||||||
ra.resize(0)
|
|
||||||
ra.copyOnWrite = false
|
|
||||||
ra.conserz = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) clone() *roaringArray {
|
|
||||||
|
|
||||||
sa := roaringArray{}
|
|
||||||
sa.copyOnWrite = ra.copyOnWrite
|
|
||||||
|
|
||||||
// this is where copyOnWrite is used.
|
|
||||||
if ra.copyOnWrite {
|
|
||||||
sa.keys = make([]uint16, len(ra.keys))
|
|
||||||
copy(sa.keys, ra.keys)
|
|
||||||
sa.containers = make([]container, len(ra.containers))
|
|
||||||
copy(sa.containers, ra.containers)
|
|
||||||
sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
|
|
||||||
|
|
||||||
ra.markAllAsNeedingCopyOnWrite()
|
|
||||||
sa.markAllAsNeedingCopyOnWrite()
|
|
||||||
|
|
||||||
// sa.needCopyOnWrite is shared
|
|
||||||
} else {
|
|
||||||
// make a full copy
|
|
||||||
|
|
||||||
sa.keys = make([]uint16, len(ra.keys))
|
|
||||||
copy(sa.keys, ra.keys)
|
|
||||||
|
|
||||||
sa.containers = make([]container, len(ra.containers))
|
|
||||||
for i := range sa.containers {
|
|
||||||
sa.containers[i] = ra.containers[i].clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
|
|
||||||
}
|
|
||||||
return &sa
|
|
||||||
}
|
|
||||||
|
|
||||||
// unused function:
|
|
||||||
//func (ra *roaringArray) containsKey(x uint16) bool {
|
|
||||||
// return (ra.binarySearch(0, int64(len(ra.keys)), x) >= 0)
|
|
||||||
//}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getContainer(x uint16) container {
|
|
||||||
i := ra.binarySearch(0, int64(len(ra.keys)), x)
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return ra.containers[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getContainerAtIndex(i int) container {
|
|
||||||
return ra.containers[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) container {
|
|
||||||
c := ra.getContainerAtIndex(i)
|
|
||||||
switch t := c.(type) {
|
|
||||||
case *arrayContainer:
|
|
||||||
c = t.toBitmapContainer()
|
|
||||||
case *runContainer16:
|
|
||||||
if !t.isFull() {
|
|
||||||
c = t.toBitmapContainer()
|
|
||||||
}
|
|
||||||
case *bitmapContainer:
|
|
||||||
if needsWriteable && ra.needCopyOnWrite[i] {
|
|
||||||
c = ra.containers[i].clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getWritableContainerAtIndex(i int) container {
|
|
||||||
if ra.needCopyOnWrite[i] {
|
|
||||||
ra.containers[i] = ra.containers[i].clone()
|
|
||||||
ra.needCopyOnWrite[i] = false
|
|
||||||
}
|
|
||||||
return ra.containers[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getIndex(x uint16) int {
|
|
||||||
// before the binary search, we optimize for frequent cases
|
|
||||||
size := len(ra.keys)
|
|
||||||
if (size == 0) || (ra.keys[size-1] == x) {
|
|
||||||
return size - 1
|
|
||||||
}
|
|
||||||
return ra.binarySearch(0, int64(size), x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) getKeyAtIndex(i int) uint16 {
|
|
||||||
return ra.keys[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) insertNewKeyValueAt(i int, key uint16, value container) {
|
|
||||||
ra.keys = append(ra.keys, 0)
|
|
||||||
ra.containers = append(ra.containers, nil)
|
|
||||||
|
|
||||||
copy(ra.keys[i+1:], ra.keys[i:])
|
|
||||||
copy(ra.containers[i+1:], ra.containers[i:])
|
|
||||||
|
|
||||||
ra.keys[i] = key
|
|
||||||
ra.containers[i] = value
|
|
||||||
|
|
||||||
ra.needCopyOnWrite = append(ra.needCopyOnWrite, false)
|
|
||||||
copy(ra.needCopyOnWrite[i+1:], ra.needCopyOnWrite[i:])
|
|
||||||
ra.needCopyOnWrite[i] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) remove(key uint16) bool {
|
|
||||||
i := ra.binarySearch(0, int64(len(ra.keys)), key)
|
|
||||||
if i >= 0 { // if a new key
|
|
||||||
ra.removeAtIndex(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) removeAtIndex(i int) {
|
|
||||||
copy(ra.keys[i:], ra.keys[i+1:])
|
|
||||||
copy(ra.containers[i:], ra.containers[i+1:])
|
|
||||||
|
|
||||||
copy(ra.needCopyOnWrite[i:], ra.needCopyOnWrite[i+1:])
|
|
||||||
|
|
||||||
ra.resize(len(ra.keys) - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) setContainerAtIndex(i int, c container) {
|
|
||||||
ra.containers[i] = c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) replaceKeyAndContainerAtIndex(i int, key uint16, c container, mustCopyOnWrite bool) {
|
|
||||||
ra.keys[i] = key
|
|
||||||
ra.containers[i] = c
|
|
||||||
ra.needCopyOnWrite[i] = mustCopyOnWrite
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) size() int {
|
|
||||||
return len(ra.keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) binarySearch(begin, end int64, ikey uint16) int {
|
|
||||||
low := begin
|
|
||||||
high := end - 1
|
|
||||||
for low+16 <= high {
|
|
||||||
middleIndex := low + (high-low)/2 // avoid overflow
|
|
||||||
middleValue := ra.keys[middleIndex]
|
|
||||||
|
|
||||||
if middleValue < ikey {
|
|
||||||
low = middleIndex + 1
|
|
||||||
} else if middleValue > ikey {
|
|
||||||
high = middleIndex - 1
|
|
||||||
} else {
|
|
||||||
return int(middleIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for ; low <= high; low++ {
|
|
||||||
val := ra.keys[low]
|
|
||||||
if val >= ikey {
|
|
||||||
if val == ikey {
|
|
||||||
return int(low)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -int(low + 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) equals(o interface{}) bool {
|
|
||||||
srb, ok := o.(roaringArray)
|
|
||||||
if ok {
|
|
||||||
|
|
||||||
if srb.size() != ra.size() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, k := range ra.keys {
|
|
||||||
if k != srb.keys[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, c := range ra.containers {
|
|
||||||
if !c.equals(srb.containers[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) headerSize() uint64 {
|
|
||||||
size := uint64(len(ra.keys))
|
|
||||||
if ra.hasRunCompression() {
|
|
||||||
if size < noOffsetThreshold { // for small bitmaps, we omit the offsets
|
|
||||||
return 4 + (size+7)/8 + 4*size
|
|
||||||
}
|
|
||||||
return 4 + (size+7)/8 + 8*size // - 4 because we pack the size with the cookie
|
|
||||||
}
|
|
||||||
return 4 + 4 + 8*size
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// should be dirt cheap
|
|
||||||
func (ra *roaringArray) serializedSizeInBytes() uint64 {
|
|
||||||
answer := ra.headerSize()
|
|
||||||
for _, c := range ra.containers {
|
|
||||||
answer += uint64(c.serializedSizeInBytes())
|
|
||||||
}
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
|
|
||||||
//
|
|
||||||
func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) {
|
|
||||||
hasRun := ra.hasRunCompression()
|
|
||||||
isRunSizeInBytes := 0
|
|
||||||
cookieSize := 8
|
|
||||||
if hasRun {
|
|
||||||
cookieSize = 4
|
|
||||||
isRunSizeInBytes = (len(ra.keys) + 7) / 8
|
|
||||||
}
|
|
||||||
descriptiveHeaderSize := 4 * len(ra.keys)
|
|
||||||
preambleSize := cookieSize + isRunSizeInBytes + descriptiveHeaderSize
|
|
||||||
|
|
||||||
buf := make([]byte, preambleSize+4*len(ra.keys))
|
|
||||||
|
|
||||||
nw := 0
|
|
||||||
|
|
||||||
if hasRun {
|
|
||||||
binary.LittleEndian.PutUint16(buf[0:], uint16(serialCookie))
|
|
||||||
nw += 2
|
|
||||||
binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1))
|
|
||||||
nw += 2
|
|
||||||
|
|
||||||
// compute isRun bitmap
|
|
||||||
var ir []byte
|
|
||||||
|
|
||||||
isRun := newBitmapContainer()
|
|
||||||
for i, c := range ra.containers {
|
|
||||||
switch c.(type) {
|
|
||||||
case *runContainer16:
|
|
||||||
isRun.iadd(uint16(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// convert to little endian
|
|
||||||
ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes]
|
|
||||||
nw += copy(buf[nw:], ir)
|
|
||||||
} else {
|
|
||||||
binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer))
|
|
||||||
nw += 4
|
|
||||||
binary.LittleEndian.PutUint32(buf[4:], uint32(len(ra.keys)))
|
|
||||||
nw += 4
|
|
||||||
}
|
|
||||||
|
|
||||||
// descriptive header
|
|
||||||
for i, key := range ra.keys {
|
|
||||||
binary.LittleEndian.PutUint16(buf[nw:], key)
|
|
||||||
nw += 2
|
|
||||||
c := ra.containers[i]
|
|
||||||
binary.LittleEndian.PutUint16(buf[nw:], uint16(c.getCardinality()-1))
|
|
||||||
nw += 2
|
|
||||||
}
|
|
||||||
|
|
||||||
startOffset := int64(preambleSize + 4*len(ra.keys))
|
|
||||||
if !hasRun || (len(ra.keys) >= noOffsetThreshold) {
|
|
||||||
// offset header
|
|
||||||
for _, c := range ra.containers {
|
|
||||||
binary.LittleEndian.PutUint32(buf[nw:], uint32(startOffset))
|
|
||||||
nw += 4
|
|
||||||
switch rc := c.(type) {
|
|
||||||
case *runContainer16:
|
|
||||||
startOffset += 2 + int64(len(rc.iv))*4
|
|
||||||
default:
|
|
||||||
startOffset += int64(getSizeInBytesFromCardinality(c.getCardinality()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
written, err := w.Write(buf[:nw])
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += int64(written)
|
|
||||||
|
|
||||||
for _, c := range ra.containers {
|
|
||||||
written, err := c.writeTo(w)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += int64(written)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
|
|
||||||
//
|
|
||||||
func (ra *roaringArray) toBytes() ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
_, err := ra.writeTo(&buf)
|
|
||||||
return buf.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) fromBuffer(buf []byte) (int64, error) {
|
|
||||||
pos := 0
|
|
||||||
if len(buf) < 8 {
|
|
||||||
return 0, fmt.Errorf("buffer too small, expecting at least 8 bytes, was %d", len(buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
cookie := binary.LittleEndian.Uint32(buf)
|
|
||||||
pos += 4
|
|
||||||
var size uint32 // number of containers
|
|
||||||
haveRunContainers := false
|
|
||||||
var isRunBitmap []byte
|
|
||||||
|
|
||||||
// cookie header
|
|
||||||
if cookie&0x0000FFFF == serialCookie {
|
|
||||||
haveRunContainers = true
|
|
||||||
size = uint32(uint16(cookie>>16) + 1) // number of containers
|
|
||||||
|
|
||||||
// create is-run-container bitmap
|
|
||||||
isRunBitmapSize := (int(size) + 7) / 8
|
|
||||||
if pos+isRunBitmapSize > len(buf) {
|
|
||||||
return 0, fmt.Errorf("malformed bitmap, is-run bitmap overruns buffer at %d", pos+isRunBitmapSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
isRunBitmap = buf[pos : pos+isRunBitmapSize]
|
|
||||||
pos += isRunBitmapSize
|
|
||||||
} else if cookie == serialCookieNoRunContainer {
|
|
||||||
size = binary.LittleEndian.Uint32(buf[pos:])
|
|
||||||
pos += 4
|
|
||||||
} else {
|
|
||||||
return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
|
|
||||||
}
|
|
||||||
if size > (1 << 16) {
|
|
||||||
return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
|
|
||||||
}
|
|
||||||
// descriptive header
|
|
||||||
// keycard - is {key, cardinality} tuple slice
|
|
||||||
if pos+2*2*int(size) > len(buf) {
|
|
||||||
return 0, fmt.Errorf("malfomred bitmap, key-cardinality slice overruns buffer at %d", pos+2*2*int(size))
|
|
||||||
}
|
|
||||||
keycard := byteSliceAsUint16Slice(buf[pos : pos+2*2*int(size)])
|
|
||||||
pos += 2 * 2 * int(size)
|
|
||||||
|
|
||||||
if !haveRunContainers || size >= noOffsetThreshold {
|
|
||||||
pos += 4 * int(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate slices upfront as number of containers is known
|
|
||||||
if cap(ra.containers) >= int(size) {
|
|
||||||
ra.containers = ra.containers[:size]
|
|
||||||
} else {
|
|
||||||
ra.containers = make([]container, size)
|
|
||||||
}
|
|
||||||
if cap(ra.keys) >= int(size) {
|
|
||||||
ra.keys = ra.keys[:size]
|
|
||||||
} else {
|
|
||||||
ra.keys = make([]uint16, size)
|
|
||||||
}
|
|
||||||
if cap(ra.needCopyOnWrite) >= int(size) {
|
|
||||||
ra.needCopyOnWrite = ra.needCopyOnWrite[:size]
|
|
||||||
} else {
|
|
||||||
ra.needCopyOnWrite = make([]bool, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := uint32(0); i < size; i++ {
|
|
||||||
key := uint16(keycard[2*i])
|
|
||||||
card := int(keycard[2*i+1]) + 1
|
|
||||||
ra.keys[i] = key
|
|
||||||
ra.needCopyOnWrite[i] = true
|
|
||||||
|
|
||||||
if haveRunContainers && isRunBitmap[i/8]&(1<<(i%8)) != 0 {
|
|
||||||
// run container
|
|
||||||
nr := binary.LittleEndian.Uint16(buf[pos:])
|
|
||||||
pos += 2
|
|
||||||
if pos+int(nr)*4 > len(buf) {
|
|
||||||
return 0, fmt.Errorf("malformed bitmap, a run container overruns buffer at %d:%d", pos, pos+int(nr)*4)
|
|
||||||
}
|
|
||||||
nb := runContainer16{
|
|
||||||
iv: byteSliceAsInterval16Slice(buf[pos : pos+int(nr)*4]),
|
|
||||||
card: int64(card),
|
|
||||||
}
|
|
||||||
pos += int(nr) * 4
|
|
||||||
ra.containers[i] = &nb
|
|
||||||
} else if card > arrayDefaultMaxSize {
|
|
||||||
// bitmap container
|
|
||||||
nb := bitmapContainer{
|
|
||||||
cardinality: card,
|
|
||||||
bitmap: byteSliceAsUint64Slice(buf[pos : pos+arrayDefaultMaxSize*2]),
|
|
||||||
}
|
|
||||||
pos += arrayDefaultMaxSize * 2
|
|
||||||
ra.containers[i] = &nb
|
|
||||||
} else {
|
|
||||||
// array container
|
|
||||||
nb := arrayContainer{
|
|
||||||
byteSliceAsUint16Slice(buf[pos : pos+card*2]),
|
|
||||||
}
|
|
||||||
pos += card * 2
|
|
||||||
ra.containers[i] = &nb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return int64(pos), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) readFrom(stream io.Reader) (int64, error) {
|
|
||||||
pos := 0
|
|
||||||
var cookie uint32
|
|
||||||
err := binary.Read(stream, binary.LittleEndian, &cookie)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err)
|
|
||||||
}
|
|
||||||
pos += 4
|
|
||||||
var size uint32
|
|
||||||
haveRunContainers := false
|
|
||||||
var isRun *bitmapContainer
|
|
||||||
if cookie&0x0000FFFF == serialCookie {
|
|
||||||
haveRunContainers = true
|
|
||||||
size = uint32(uint16(cookie>>16) + 1)
|
|
||||||
bytesToRead := (int(size) + 7) / 8
|
|
||||||
numwords := (bytesToRead + 7) / 8
|
|
||||||
by := make([]byte, bytesToRead, numwords*8)
|
|
||||||
nr, err := io.ReadFull(stream, by)
|
|
||||||
if err != nil {
|
|
||||||
return 8 + int64(nr), fmt.Errorf("error in readFrom: could not read the "+
|
|
||||||
"runContainer bit flags of length %v bytes: %v", bytesToRead, err)
|
|
||||||
}
|
|
||||||
pos += bytesToRead
|
|
||||||
by = by[:cap(by)]
|
|
||||||
isRun = newBitmapContainer()
|
|
||||||
for i := 0; i < numwords; i++ {
|
|
||||||
isRun.bitmap[i] = binary.LittleEndian.Uint64(by)
|
|
||||||
by = by[8:]
|
|
||||||
}
|
|
||||||
} else if cookie == serialCookieNoRunContainer {
|
|
||||||
err = binary.Read(stream, binary.LittleEndian, &size)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("error in roaringArray.readFrom: when reading size, got: %s", err)
|
|
||||||
}
|
|
||||||
pos += 4
|
|
||||||
} else {
|
|
||||||
return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
|
|
||||||
}
|
|
||||||
if size > (1 << 16) {
|
|
||||||
return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
|
|
||||||
}
|
|
||||||
// descriptive header
|
|
||||||
keycard := make([]uint16, 2*size, 2*size)
|
|
||||||
err = binary.Read(stream, binary.LittleEndian, keycard)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pos += 2 * 2 * int(size)
|
|
||||||
// offset header
|
|
||||||
if !haveRunContainers || size >= noOffsetThreshold {
|
|
||||||
io.CopyN(ioutil.Discard, stream, 4*int64(size)) // we never skip ahead so this data can be ignored
|
|
||||||
pos += 4 * int(size)
|
|
||||||
}
|
|
||||||
for i := uint32(0); i < size; i++ {
|
|
||||||
key := int(keycard[2*i])
|
|
||||||
card := int(keycard[2*i+1]) + 1
|
|
||||||
if haveRunContainers && isRun.contains(uint16(i)) {
|
|
||||||
nb := newRunContainer16()
|
|
||||||
nr, err := nb.readFrom(stream)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pos += nr
|
|
||||||
ra.appendContainer(uint16(key), nb, false)
|
|
||||||
} else if card > arrayDefaultMaxSize {
|
|
||||||
nb := newBitmapContainer()
|
|
||||||
nr, err := nb.readFrom(stream)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
nb.cardinality = card
|
|
||||||
pos += nr
|
|
||||||
ra.appendContainer(keycard[2*i], nb, false)
|
|
||||||
} else {
|
|
||||||
nb := newArrayContainerSize(card)
|
|
||||||
nr, err := nb.readFrom(stream)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
pos += nr
|
|
||||||
ra.appendContainer(keycard[2*i], nb, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int64(pos), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) hasRunCompression() bool {
|
|
||||||
for _, c := range ra.containers {
|
|
||||||
switch c.(type) {
|
|
||||||
case *runContainer16:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) writeToMsgpack(stream io.Writer) error {
|
|
||||||
|
|
||||||
ra.conserz = make([]containerSerz, len(ra.containers))
|
|
||||||
for i, v := range ra.containers {
|
|
||||||
switch cn := v.(type) {
|
|
||||||
case *bitmapContainer:
|
|
||||||
bts, err := cn.MarshalMsg(nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.conserz[i].t = bitmapContype
|
|
||||||
ra.conserz[i].r = bts
|
|
||||||
case *arrayContainer:
|
|
||||||
bts, err := cn.MarshalMsg(nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.conserz[i].t = arrayContype
|
|
||||||
ra.conserz[i].r = bts
|
|
||||||
case *runContainer16:
|
|
||||||
bts, err := cn.MarshalMsg(nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.conserz[i].t = run16Contype
|
|
||||||
ra.conserz[i].r = bts
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("Unrecognized container implementation: %T", cn))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w := snappy.NewWriter(stream)
|
|
||||||
err := msgp.Encode(w, ra)
|
|
||||||
ra.conserz = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) readFromMsgpack(stream io.Reader) error {
|
|
||||||
r := snappy.NewReader(stream)
|
|
||||||
err := msgp.Decode(r, ra)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ra.containers) != len(ra.keys) {
|
|
||||||
ra.containers = make([]container, len(ra.keys))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, v := range ra.conserz {
|
|
||||||
switch v.t {
|
|
||||||
case bitmapContype:
|
|
||||||
c := &bitmapContainer{}
|
|
||||||
_, err = c.UnmarshalMsg(v.r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.containers[i] = c
|
|
||||||
case arrayContype:
|
|
||||||
c := &arrayContainer{}
|
|
||||||
_, err = c.UnmarshalMsg(v.r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.containers[i] = c
|
|
||||||
case run16Contype:
|
|
||||||
c := &runContainer16{}
|
|
||||||
_, err = c.UnmarshalMsg(v.r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ra.containers[i] = c
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ra.conserz = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) advanceUntil(min uint16, pos int) int {
|
|
||||||
lower := pos + 1
|
|
||||||
|
|
||||||
if lower >= len(ra.keys) || ra.keys[lower] >= min {
|
|
||||||
return lower
|
|
||||||
}
|
|
||||||
|
|
||||||
spansize := 1
|
|
||||||
|
|
||||||
for lower+spansize < len(ra.keys) && ra.keys[lower+spansize] < min {
|
|
||||||
spansize *= 2
|
|
||||||
}
|
|
||||||
var upper int
|
|
||||||
if lower+spansize < len(ra.keys) {
|
|
||||||
upper = lower + spansize
|
|
||||||
} else {
|
|
||||||
upper = len(ra.keys) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if ra.keys[upper] == min {
|
|
||||||
return upper
|
|
||||||
}
|
|
||||||
|
|
||||||
if ra.keys[upper] < min {
|
|
||||||
// means
|
|
||||||
// array
|
|
||||||
// has no
|
|
||||||
// item
|
|
||||||
// >= min
|
|
||||||
// pos = array.length;
|
|
||||||
return len(ra.keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we know that the next-smallest span was too small
|
|
||||||
lower += (spansize >> 1)
|
|
||||||
|
|
||||||
mid := 0
|
|
||||||
for lower+1 != upper {
|
|
||||||
mid = (lower + upper) >> 1
|
|
||||||
if ra.keys[mid] == min {
|
|
||||||
return mid
|
|
||||||
} else if ra.keys[mid] < min {
|
|
||||||
lower = mid
|
|
||||||
} else {
|
|
||||||
upper = mid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return upper
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) markAllAsNeedingCopyOnWrite() {
|
|
||||||
for i := range ra.needCopyOnWrite {
|
|
||||||
ra.needCopyOnWrite[i] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) needsCopyOnWrite(i int) bool {
|
|
||||||
return ra.needCopyOnWrite[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *roaringArray) setNeedsCopyOnWrite(i int) {
|
|
||||||
ra.needCopyOnWrite[i] = true
|
|
||||||
}
|
|
529
vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go
generated
vendored
529
vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go
generated
vendored
@ -1,529 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
// NOTE: THIS FILE WAS PRODUCED BY THE
|
|
||||||
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/tinylib/msgp/msgp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zxvk uint32
|
|
||||||
zxvk, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zxvk > 0 {
|
|
||||||
zxvk--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "t":
|
|
||||||
{
|
|
||||||
var zbzg uint8
|
|
||||||
zbzg, err = dc.ReadUint8()
|
|
||||||
z.t = contype(zbzg)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "r":
|
|
||||||
err = z.r.DecodeMsg(dc)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
// map header, size 2
|
|
||||||
// write "t"
|
|
||||||
err = en.Append(0x82, 0xa1, 0x74)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteUint8(uint8(z.t))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// write "r"
|
|
||||||
err = en.Append(0xa1, 0x72)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = z.r.EncodeMsg(en)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
// map header, size 2
|
|
||||||
// string "t"
|
|
||||||
o = append(o, 0x82, 0xa1, 0x74)
|
|
||||||
o = msgp.AppendUint8(o, uint8(z.t))
|
|
||||||
// string "r"
|
|
||||||
o = append(o, 0xa1, 0x72)
|
|
||||||
o, err = z.r.MarshalMsg(o)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zbai uint32
|
|
||||||
zbai, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zbai > 0 {
|
|
||||||
zbai--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "t":
|
|
||||||
{
|
|
||||||
var zcmr uint8
|
|
||||||
zcmr, bts, err = msgp.ReadUint8Bytes(bts)
|
|
||||||
z.t = contype(zcmr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "r":
|
|
||||||
bts, err = z.r.UnmarshalMsg(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z *containerSerz) Msgsize() (s int) {
|
|
||||||
s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
{
|
|
||||||
var zajw uint8
|
|
||||||
zajw, err = dc.ReadUint8()
|
|
||||||
(*z) = contype(zajw)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z contype) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
err = en.WriteUint8(uint8(z))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z contype) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
o = msgp.AppendUint8(o, uint8(z))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
{
|
|
||||||
var zwht uint8
|
|
||||||
zwht, bts, err = msgp.ReadUint8Bytes(bts)
|
|
||||||
(*z) = contype(zwht)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z contype) Msgsize() (s int) {
|
|
||||||
s = msgp.Uint8Size
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: DecodeMsg implements msgp.Decodable
|
|
||||||
func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zlqf uint32
|
|
||||||
zlqf, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zlqf > 0 {
|
|
||||||
zlqf--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "keys":
|
|
||||||
var zdaf uint32
|
|
||||||
zdaf, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.keys) >= int(zdaf) {
|
|
||||||
z.keys = (z.keys)[:zdaf]
|
|
||||||
} else {
|
|
||||||
z.keys = make([]uint16, zdaf)
|
|
||||||
}
|
|
||||||
for zhct := range z.keys {
|
|
||||||
z.keys[zhct], err = dc.ReadUint16()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "needCopyOnWrite":
|
|
||||||
var zpks uint32
|
|
||||||
zpks, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.needCopyOnWrite) >= int(zpks) {
|
|
||||||
z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks]
|
|
||||||
} else {
|
|
||||||
z.needCopyOnWrite = make([]bool, zpks)
|
|
||||||
}
|
|
||||||
for zcua := range z.needCopyOnWrite {
|
|
||||||
z.needCopyOnWrite[zcua], err = dc.ReadBool()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "copyOnWrite":
|
|
||||||
z.copyOnWrite, err = dc.ReadBool()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "conserz":
|
|
||||||
var zjfb uint32
|
|
||||||
zjfb, err = dc.ReadArrayHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.conserz) >= int(zjfb) {
|
|
||||||
z.conserz = (z.conserz)[:zjfb]
|
|
||||||
} else {
|
|
||||||
z.conserz = make([]containerSerz, zjfb)
|
|
||||||
}
|
|
||||||
for zxhx := range z.conserz {
|
|
||||||
var zcxo uint32
|
|
||||||
zcxo, err = dc.ReadMapHeader()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zcxo > 0 {
|
|
||||||
zcxo--
|
|
||||||
field, err = dc.ReadMapKeyPtr()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "t":
|
|
||||||
{
|
|
||||||
var zeff uint8
|
|
||||||
zeff, err = dc.ReadUint8()
|
|
||||||
z.conserz[zxhx].t = contype(zeff)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "r":
|
|
||||||
err = z.conserz[zxhx].r.DecodeMsg(dc)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = dc.Skip()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: EncodeMsg implements msgp.Encodable
|
|
||||||
func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) {
|
|
||||||
// map header, size 4
|
|
||||||
// write "keys"
|
|
||||||
err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.keys)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zhct := range z.keys {
|
|
||||||
err = en.WriteUint16(z.keys[zhct])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// write "needCopyOnWrite"
|
|
||||||
err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zcua := range z.needCopyOnWrite {
|
|
||||||
err = en.WriteBool(z.needCopyOnWrite[zcua])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// write "copyOnWrite"
|
|
||||||
err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteBool(z.copyOnWrite)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// write "conserz"
|
|
||||||
err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteArrayHeader(uint32(len(z.conserz)))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zxhx := range z.conserz {
|
|
||||||
// map header, size 2
|
|
||||||
// write "t"
|
|
||||||
err = en.Append(0x82, 0xa1, 0x74)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = en.WriteUint8(uint8(z.conserz[zxhx].t))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// write "r"
|
|
||||||
err = en.Append(0xa1, 0x72)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = z.conserz[zxhx].r.EncodeMsg(en)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: MarshalMsg implements msgp.Marshaler
|
|
||||||
func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) {
|
|
||||||
o = msgp.Require(b, z.Msgsize())
|
|
||||||
// map header, size 4
|
|
||||||
// string "keys"
|
|
||||||
o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.keys)))
|
|
||||||
for zhct := range z.keys {
|
|
||||||
o = msgp.AppendUint16(o, z.keys[zhct])
|
|
||||||
}
|
|
||||||
// string "needCopyOnWrite"
|
|
||||||
o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite)))
|
|
||||||
for zcua := range z.needCopyOnWrite {
|
|
||||||
o = msgp.AppendBool(o, z.needCopyOnWrite[zcua])
|
|
||||||
}
|
|
||||||
// string "copyOnWrite"
|
|
||||||
o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
|
|
||||||
o = msgp.AppendBool(o, z.copyOnWrite)
|
|
||||||
// string "conserz"
|
|
||||||
o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
|
|
||||||
o = msgp.AppendArrayHeader(o, uint32(len(z.conserz)))
|
|
||||||
for zxhx := range z.conserz {
|
|
||||||
// map header, size 2
|
|
||||||
// string "t"
|
|
||||||
o = append(o, 0x82, 0xa1, 0x74)
|
|
||||||
o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t))
|
|
||||||
// string "r"
|
|
||||||
o = append(o, 0xa1, 0x72)
|
|
||||||
o, err = z.conserz[zxhx].r.MarshalMsg(o)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
|
|
||||||
func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
||||||
var field []byte
|
|
||||||
_ = field
|
|
||||||
var zrsw uint32
|
|
||||||
zrsw, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zrsw > 0 {
|
|
||||||
zrsw--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "keys":
|
|
||||||
var zxpk uint32
|
|
||||||
zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.keys) >= int(zxpk) {
|
|
||||||
z.keys = (z.keys)[:zxpk]
|
|
||||||
} else {
|
|
||||||
z.keys = make([]uint16, zxpk)
|
|
||||||
}
|
|
||||||
for zhct := range z.keys {
|
|
||||||
z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "needCopyOnWrite":
|
|
||||||
var zdnj uint32
|
|
||||||
zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.needCopyOnWrite) >= int(zdnj) {
|
|
||||||
z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj]
|
|
||||||
} else {
|
|
||||||
z.needCopyOnWrite = make([]bool, zdnj)
|
|
||||||
}
|
|
||||||
for zcua := range z.needCopyOnWrite {
|
|
||||||
z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "copyOnWrite":
|
|
||||||
z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "conserz":
|
|
||||||
var zobc uint32
|
|
||||||
zobc, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cap(z.conserz) >= int(zobc) {
|
|
||||||
z.conserz = (z.conserz)[:zobc]
|
|
||||||
} else {
|
|
||||||
z.conserz = make([]containerSerz, zobc)
|
|
||||||
}
|
|
||||||
for zxhx := range z.conserz {
|
|
||||||
var zsnv uint32
|
|
||||||
zsnv, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for zsnv > 0 {
|
|
||||||
zsnv--
|
|
||||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch msgp.UnsafeString(field) {
|
|
||||||
case "t":
|
|
||||||
{
|
|
||||||
var zkgt uint8
|
|
||||||
zkgt, bts, err = msgp.ReadUint8Bytes(bts)
|
|
||||||
z.conserz[zxhx].t = contype(zkgt)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "r":
|
|
||||||
bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
bts, err = msgp.Skip(bts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o = bts
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
||||||
func (z *roaringArray) Msgsize() (s int) {
|
|
||||||
s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize
|
|
||||||
for zxhx := range z.conserz {
|
|
||||||
s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
2560
vendor/github.com/RoaringBitmap/roaring/runcontainer.go
generated
vendored
2560
vendor/github.com/RoaringBitmap/roaring/runcontainer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1126
vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go
generated
vendored
1126
vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
70
vendor/github.com/RoaringBitmap/roaring/serialization.go
generated
vendored
70
vendor/github.com/RoaringBitmap/roaring/serialization.go
generated
vendored
@ -1,70 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/tinylib/msgp/msgp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeTo for runContainer16 follows this
|
|
||||||
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
|
|
||||||
//
|
|
||||||
func (b *runContainer16) writeTo(stream io.Writer) (int, error) {
|
|
||||||
buf := make([]byte, 2+4*len(b.iv))
|
|
||||||
binary.LittleEndian.PutUint16(buf[0:], uint16(len(b.iv)))
|
|
||||||
for i, v := range b.iv {
|
|
||||||
binary.LittleEndian.PutUint16(buf[2+i*4:], v.start)
|
|
||||||
binary.LittleEndian.PutUint16(buf[2+2+i*4:], v.length)
|
|
||||||
}
|
|
||||||
return stream.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) {
|
|
||||||
bts, err := b.MarshalMsg(nil)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return stream.Write(bts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) {
|
|
||||||
err := msgp.Decode(stream, b)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var errCorruptedStream = errors.New("insufficient/odd number of stored bytes, corrupted stream detected")
|
|
||||||
|
|
||||||
func (b *runContainer16) readFrom(stream io.Reader) (int, error) {
|
|
||||||
b.iv = b.iv[:0]
|
|
||||||
b.card = 0
|
|
||||||
var numRuns uint16
|
|
||||||
err := binary.Read(stream, binary.LittleEndian, &numRuns)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
nr := int(numRuns)
|
|
||||||
encRun := make([]uint16, 2*nr)
|
|
||||||
by := make([]byte, 4*nr)
|
|
||||||
err = binary.Read(stream, binary.LittleEndian, &by)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
for i := range encRun {
|
|
||||||
if len(by) < 2 {
|
|
||||||
return 0, errCorruptedStream
|
|
||||||
}
|
|
||||||
encRun[i] = binary.LittleEndian.Uint16(by)
|
|
||||||
by = by[2:]
|
|
||||||
}
|
|
||||||
for i := 0; i < nr; i++ {
|
|
||||||
if i > 0 && b.iv[i-1].last() >= encRun[i*2] {
|
|
||||||
return 0, fmt.Errorf("error: stored runContainer had runs that were not in sorted order!! (b.iv[i-1=%v].last = %v >= encRun[i=%v] = %v)", i-1, b.iv[i-1].last(), i, encRun[i*2])
|
|
||||||
}
|
|
||||||
b.iv = append(b.iv, interval16{start: encRun[i*2], length: encRun[i*2+1]})
|
|
||||||
b.card += int64(encRun[i*2+1]) + 1
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
123
vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
generated
vendored
123
vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
// +build !amd64,!386 appengine
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *arrayContainer) writeTo(stream io.Writer) (int, error) {
|
|
||||||
buf := make([]byte, 2*len(b.content))
|
|
||||||
for i, v := range b.content {
|
|
||||||
base := i * 2
|
|
||||||
buf[base] = byte(v)
|
|
||||||
buf[base+1] = byte(v >> 8)
|
|
||||||
}
|
|
||||||
return stream.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *arrayContainer) readFrom(stream io.Reader) (int, error) {
|
|
||||||
err := binary.Read(stream, binary.LittleEndian, b.content)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return 2 * len(b.content), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitmapContainer) writeTo(stream io.Writer) (int, error) {
|
|
||||||
if b.cardinality <= arrayDefaultMaxSize {
|
|
||||||
return 0, errors.New("refusing to write bitmap container with cardinality of array container")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write set
|
|
||||||
buf := make([]byte, 8*len(b.bitmap))
|
|
||||||
for i, v := range b.bitmap {
|
|
||||||
base := i * 8
|
|
||||||
buf[base] = byte(v)
|
|
||||||
buf[base+1] = byte(v >> 8)
|
|
||||||
buf[base+2] = byte(v >> 16)
|
|
||||||
buf[base+3] = byte(v >> 24)
|
|
||||||
buf[base+4] = byte(v >> 32)
|
|
||||||
buf[base+5] = byte(v >> 40)
|
|
||||||
buf[base+6] = byte(v >> 48)
|
|
||||||
buf[base+7] = byte(v >> 56)
|
|
||||||
}
|
|
||||||
return stream.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitmapContainer) readFrom(stream io.Reader) (int, error) {
|
|
||||||
err := binary.Read(stream, binary.LittleEndian, b.bitmap)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
b.computeCardinality()
|
|
||||||
return 8 * len(b.bitmap), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
|
|
||||||
by := make([]byte, len(bc.bitmap)*8)
|
|
||||||
for i := range bc.bitmap {
|
|
||||||
binary.LittleEndian.PutUint64(by[i*8:], bc.bitmap[i])
|
|
||||||
}
|
|
||||||
return by
|
|
||||||
}
|
|
||||||
|
|
||||||
func uint64SliceAsByteSlice(slice []uint64) []byte {
|
|
||||||
by := make([]byte, len(slice)*8)
|
|
||||||
|
|
||||||
for i, v := range slice {
|
|
||||||
binary.LittleEndian.PutUint64(by[i*8:], v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return by
|
|
||||||
}
|
|
||||||
|
|
||||||
func byteSliceAsUint16Slice(slice []byte) []uint16 {
|
|
||||||
if len(slice)%2 != 0 {
|
|
||||||
panic("Slice size should be divisible by 2")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := make([]uint16, len(slice)/2)
|
|
||||||
|
|
||||||
for i := range b {
|
|
||||||
b[i] = binary.LittleEndian.Uint16(slice[2*i:])
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func byteSliceAsUint64Slice(slice []byte) []uint64 {
|
|
||||||
if len(slice)%8 != 0 {
|
|
||||||
panic("Slice size should be divisible by 8")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := make([]uint64, len(slice)/8)
|
|
||||||
|
|
||||||
for i := range b {
|
|
||||||
b[i] = binary.LittleEndian.Uint64(slice[8*i:])
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts a byte slice to a interval16 slice.
|
|
||||||
// The function assumes that the slice byte buffer is run container data
|
|
||||||
// encoded according to Roaring Format Spec
|
|
||||||
func byteSliceAsInterval16Slice(byteSlice []byte) []interval16 {
|
|
||||||
if len(byteSlice)%4 != 0 {
|
|
||||||
panic("Slice size should be divisible by 4")
|
|
||||||
}
|
|
||||||
|
|
||||||
intervalSlice := make([]interval16, len(byteSlice)/4)
|
|
||||||
|
|
||||||
for i := range intervalSlice {
|
|
||||||
intervalSlice[i] = interval16{
|
|
||||||
start: binary.LittleEndian.Uint16(byteSlice[i*4:]),
|
|
||||||
length: binary.LittleEndian.Uint16(byteSlice[i*4+2:]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return intervalSlice
|
|
||||||
}
|
|
117
vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
generated
vendored
117
vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
// +build 386 amd64,!appengine
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ac *arrayContainer) writeTo(stream io.Writer) (int, error) {
|
|
||||||
buf := uint16SliceAsByteSlice(ac.content)
|
|
||||||
return stream.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *bitmapContainer) writeTo(stream io.Writer) (int, error) {
|
|
||||||
if bc.cardinality <= arrayDefaultMaxSize {
|
|
||||||
return 0, errors.New("refusing to write bitmap container with cardinality of array container")
|
|
||||||
}
|
|
||||||
buf := uint64SliceAsByteSlice(bc.bitmap)
|
|
||||||
return stream.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFrom reads an arrayContainer from stream.
|
|
||||||
// PRE-REQUISITE: you must size the arrayContainer correctly (allocate b.content)
|
|
||||||
// *before* you call readFrom. We can't guess the size in the stream
|
|
||||||
// by this point.
|
|
||||||
func (ac *arrayContainer) readFrom(stream io.Reader) (int, error) {
|
|
||||||
buf := uint16SliceAsByteSlice(ac.content)
|
|
||||||
return io.ReadFull(stream, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *bitmapContainer) readFrom(stream io.Reader) (int, error) {
|
|
||||||
buf := uint64SliceAsByteSlice(bc.bitmap)
|
|
||||||
n, err := io.ReadFull(stream, buf)
|
|
||||||
bc.computeCardinality()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func uint64SliceAsByteSlice(slice []uint64) []byte {
|
|
||||||
// make a new slice header
|
|
||||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
|
|
||||||
|
|
||||||
// update its capacity and length
|
|
||||||
header.Len *= 8
|
|
||||||
header.Cap *= 8
|
|
||||||
|
|
||||||
// return it
|
|
||||||
return *(*[]byte)(unsafe.Pointer(&header))
|
|
||||||
}
|
|
||||||
|
|
||||||
func uint16SliceAsByteSlice(slice []uint16) []byte {
|
|
||||||
// make a new slice header
|
|
||||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
|
|
||||||
|
|
||||||
// update its capacity and length
|
|
||||||
header.Len *= 2
|
|
||||||
header.Cap *= 2
|
|
||||||
|
|
||||||
// return it
|
|
||||||
return *(*[]byte)(unsafe.Pointer(&header))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
|
|
||||||
return uint64SliceAsByteSlice(bc.bitmap)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deserialization code follows
|
|
||||||
|
|
||||||
func byteSliceAsUint16Slice(slice []byte) []uint16 {
|
|
||||||
if len(slice)%2 != 0 {
|
|
||||||
panic("Slice size should be divisible by 2")
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a new slice header
|
|
||||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
|
|
||||||
|
|
||||||
// update its capacity and length
|
|
||||||
header.Len /= 2
|
|
||||||
header.Cap /= 2
|
|
||||||
|
|
||||||
// return it
|
|
||||||
return *(*[]uint16)(unsafe.Pointer(&header))
|
|
||||||
}
|
|
||||||
|
|
||||||
func byteSliceAsUint64Slice(slice []byte) []uint64 {
|
|
||||||
if len(slice)%8 != 0 {
|
|
||||||
panic("Slice size should be divisible by 8")
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a new slice header
|
|
||||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
|
|
||||||
|
|
||||||
// update its capacity and length
|
|
||||||
header.Len /= 8
|
|
||||||
header.Cap /= 8
|
|
||||||
|
|
||||||
// return it
|
|
||||||
return *(*[]uint64)(unsafe.Pointer(&header))
|
|
||||||
}
|
|
||||||
|
|
||||||
func byteSliceAsInterval16Slice(slice []byte) []interval16 {
|
|
||||||
if len(slice)%4 != 0 {
|
|
||||||
panic("Slice size should be divisible by 4")
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a new slice header
|
|
||||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
|
|
||||||
|
|
||||||
// update its capacity and length
|
|
||||||
header.Len /= 4
|
|
||||||
header.Cap /= 4
|
|
||||||
|
|
||||||
// return it
|
|
||||||
return *(*[]interval16)(unsafe.Pointer(&header))
|
|
||||||
}
|
|
21
vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go
generated
vendored
21
vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
func FuzzSerializationStream(data []byte) int {
|
|
||||||
newrb := NewBitmap()
|
|
||||||
if _, err := newrb.ReadFrom(bytes.NewReader(data)); err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzSerializationBuffer(data []byte) int {
|
|
||||||
newrb := NewBitmap()
|
|
||||||
if _, err := newrb.FromBuffer(data); err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
609
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
609
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
@ -1,609 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
func equal(a, b []uint16) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func difference(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
|
||||||
if 0 == len(set2) {
|
|
||||||
for k := 0; k < len(set1); k++ {
|
|
||||||
buffer[k] = set1[k]
|
|
||||||
}
|
|
||||||
return len(set1)
|
|
||||||
}
|
|
||||||
if 0 == len(set1) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
pos := 0
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
buffer = buffer[:cap(buffer)]
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
buffer[pos] = s1
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
} else if s1 == s2 {
|
|
||||||
k1++
|
|
||||||
k2++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
for ; k1 < len(set1); k1++ {
|
|
||||||
buffer[pos] = set1[k1]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
} else { // if (val1>val2)
|
|
||||||
k2++
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
for ; k1 < len(set1); k1++ {
|
|
||||||
buffer[pos] = set1[k1]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
|
||||||
if 0 == len(set2) {
|
|
||||||
buffer = buffer[:len(set1)]
|
|
||||||
copy(buffer, set1[:])
|
|
||||||
return len(set1)
|
|
||||||
}
|
|
||||||
if 0 == len(set1) {
|
|
||||||
buffer = buffer[:len(set2)]
|
|
||||||
copy(buffer, set2[:])
|
|
||||||
return len(set2)
|
|
||||||
}
|
|
||||||
pos := 0
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
buffer = buffer[:cap(buffer)]
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
buffer[pos] = s1
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
for ; k2 < len(set2); k2++ {
|
|
||||||
buffer[pos] = set2[k2]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
} else if s1 == s2 {
|
|
||||||
k1++
|
|
||||||
k2++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
for ; k2 < len(set2); k2++ {
|
|
||||||
buffer[pos] = set2[k2]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
for ; k1 < len(set1); k1++ {
|
|
||||||
buffer[pos] = set1[k1]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
s2 = set2[k2]
|
|
||||||
} else { // if (val1>val2)
|
|
||||||
buffer[pos] = s2
|
|
||||||
pos++
|
|
||||||
k2++
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
for ; k1 < len(set1); k1++ {
|
|
||||||
buffer[pos] = set1[k1]
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
|
||||||
pos := 0
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
if 0 == len(set2) {
|
|
||||||
buffer = buffer[:len(set1)]
|
|
||||||
copy(buffer, set1[:])
|
|
||||||
return len(set1)
|
|
||||||
}
|
|
||||||
if 0 == len(set1) {
|
|
||||||
buffer = buffer[:len(set2)]
|
|
||||||
copy(buffer, set2[:])
|
|
||||||
return len(set2)
|
|
||||||
}
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
buffer = buffer[:cap(buffer)]
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
buffer[pos] = s1
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
copy(buffer[pos:], set2[k2:])
|
|
||||||
pos += len(set2) - k2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
} else if s1 == s2 {
|
|
||||||
buffer[pos] = s1
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
k2++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
copy(buffer[pos:], set2[k2:])
|
|
||||||
pos += len(set2) - k2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
copy(buffer[pos:], set1[k1:])
|
|
||||||
pos += len(set1) - k1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
s2 = set2[k2]
|
|
||||||
} else { // if (set1[k1]>set2[k2])
|
|
||||||
buffer[pos] = s2
|
|
||||||
pos++
|
|
||||||
k2++
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
copy(buffer[pos:], set1[k1:])
|
|
||||||
pos += len(set1) - k1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func union2by2Cardinality(set1 []uint16, set2 []uint16) int {
|
|
||||||
pos := 0
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
if 0 == len(set2) {
|
|
||||||
return len(set1)
|
|
||||||
}
|
|
||||||
if 0 == len(set1) {
|
|
||||||
return len(set2)
|
|
||||||
}
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
pos += len(set2) - k2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
} else if s1 == s2 {
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
k2++
|
|
||||||
if k1 >= len(set1) {
|
|
||||||
pos += len(set2) - k2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
pos += len(set1) - k1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
s2 = set2[k2]
|
|
||||||
} else { // if (set1[k1]>set2[k2])
|
|
||||||
pos++
|
|
||||||
k2++
|
|
||||||
if k2 >= len(set2) {
|
|
||||||
pos += len(set1) - k1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func intersection2by2(
|
|
||||||
set1 []uint16,
|
|
||||||
set2 []uint16,
|
|
||||||
buffer []uint16) int {
|
|
||||||
|
|
||||||
if len(set1)*64 < len(set2) {
|
|
||||||
return onesidedgallopingintersect2by2(set1, set2, buffer)
|
|
||||||
} else if len(set2)*64 < len(set1) {
|
|
||||||
return onesidedgallopingintersect2by2(set2, set1, buffer)
|
|
||||||
} else {
|
|
||||||
return localintersect2by2(set1, set2, buffer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func intersection2by2Cardinality(
|
|
||||||
set1 []uint16,
|
|
||||||
set2 []uint16) int {
|
|
||||||
|
|
||||||
if len(set1)*64 < len(set2) {
|
|
||||||
return onesidedgallopingintersect2by2Cardinality(set1, set2)
|
|
||||||
} else if len(set2)*64 < len(set1) {
|
|
||||||
return onesidedgallopingintersect2by2Cardinality(set2, set1)
|
|
||||||
} else {
|
|
||||||
return localintersect2by2Cardinality(set1, set2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func intersects2by2(
|
|
||||||
set1 []uint16,
|
|
||||||
set2 []uint16) bool {
|
|
||||||
// could be optimized if one set is much larger than the other one
|
|
||||||
if (0 == len(set1)) || (0 == len(set2)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
mainwhile:
|
|
||||||
for {
|
|
||||||
|
|
||||||
if s2 < s1 {
|
|
||||||
for {
|
|
||||||
k2++
|
|
||||||
if k2 == len(set2) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
if s2 >= s1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s1 < s2 {
|
|
||||||
for {
|
|
||||||
k1++
|
|
||||||
if k1 == len(set1) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
if s1 >= s2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// (set2[k2] == set1[k1])
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func localintersect2by2(
|
|
||||||
set1 []uint16,
|
|
||||||
set2 []uint16,
|
|
||||||
buffer []uint16) int {
|
|
||||||
|
|
||||||
if (0 == len(set1)) || (0 == len(set2)) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
pos := 0
|
|
||||||
buffer = buffer[:cap(buffer)]
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
mainwhile:
|
|
||||||
for {
|
|
||||||
if s2 < s1 {
|
|
||||||
for {
|
|
||||||
k2++
|
|
||||||
if k2 == len(set2) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
if s2 >= s1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s1 < s2 {
|
|
||||||
for {
|
|
||||||
k1++
|
|
||||||
if k1 == len(set1) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
if s1 >= s2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// (set2[k2] == set1[k1])
|
|
||||||
buffer[pos] = s1
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 == len(set1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
k2++
|
|
||||||
if k2 == len(set2) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func localintersect2by2Cardinality(
|
|
||||||
set1 []uint16,
|
|
||||||
set2 []uint16) int {
|
|
||||||
|
|
||||||
if (0 == len(set1)) || (0 == len(set2)) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
pos := 0
|
|
||||||
s1 := set1[k1]
|
|
||||||
s2 := set2[k2]
|
|
||||||
mainwhile:
|
|
||||||
for {
|
|
||||||
if s2 < s1 {
|
|
||||||
for {
|
|
||||||
k2++
|
|
||||||
if k2 == len(set2) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
if s2 >= s1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s1 < s2 {
|
|
||||||
for {
|
|
||||||
k1++
|
|
||||||
if k1 == len(set1) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
if s1 >= s2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// (set2[k2] == set1[k1])
|
|
||||||
pos++
|
|
||||||
k1++
|
|
||||||
if k1 == len(set1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s1 = set1[k1]
|
|
||||||
k2++
|
|
||||||
if k2 == len(set2) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = set2[k2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func advanceUntil(
|
|
||||||
array []uint16,
|
|
||||||
pos int,
|
|
||||||
length int,
|
|
||||||
min uint16) int {
|
|
||||||
lower := pos + 1
|
|
||||||
|
|
||||||
if lower >= length || array[lower] >= min {
|
|
||||||
return lower
|
|
||||||
}
|
|
||||||
|
|
||||||
spansize := 1
|
|
||||||
|
|
||||||
for lower+spansize < length && array[lower+spansize] < min {
|
|
||||||
spansize *= 2
|
|
||||||
}
|
|
||||||
var upper int
|
|
||||||
if lower+spansize < length {
|
|
||||||
upper = lower + spansize
|
|
||||||
} else {
|
|
||||||
upper = length - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if array[upper] == min {
|
|
||||||
return upper
|
|
||||||
}
|
|
||||||
|
|
||||||
if array[upper] < min {
|
|
||||||
// means
|
|
||||||
// array
|
|
||||||
// has no
|
|
||||||
// item
|
|
||||||
// >= min
|
|
||||||
// pos = array.length;
|
|
||||||
return length
|
|
||||||
}
|
|
||||||
|
|
||||||
// we know that the next-smallest span was too small
|
|
||||||
lower += (spansize >> 1)
|
|
||||||
|
|
||||||
mid := 0
|
|
||||||
for lower+1 != upper {
|
|
||||||
mid = (lower + upper) >> 1
|
|
||||||
if array[mid] == min {
|
|
||||||
return mid
|
|
||||||
} else if array[mid] < min {
|
|
||||||
lower = mid
|
|
||||||
} else {
|
|
||||||
upper = mid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return upper
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func onesidedgallopingintersect2by2(
|
|
||||||
smallset []uint16,
|
|
||||||
largeset []uint16,
|
|
||||||
buffer []uint16) int {
|
|
||||||
|
|
||||||
if 0 == len(smallset) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
buffer = buffer[:cap(buffer)]
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
pos := 0
|
|
||||||
s1 := largeset[k1]
|
|
||||||
s2 := smallset[k2]
|
|
||||||
mainwhile:
|
|
||||||
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
k1 = advanceUntil(largeset, k1, len(largeset), s2)
|
|
||||||
if k1 == len(largeset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = largeset[k1]
|
|
||||||
}
|
|
||||||
if s2 < s1 {
|
|
||||||
k2++
|
|
||||||
if k2 == len(smallset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s2 = smallset[k2]
|
|
||||||
} else {
|
|
||||||
|
|
||||||
buffer[pos] = s2
|
|
||||||
pos++
|
|
||||||
k2++
|
|
||||||
if k2 == len(smallset) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = smallset[k2]
|
|
||||||
k1 = advanceUntil(largeset, k1, len(largeset), s2)
|
|
||||||
if k1 == len(largeset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = largeset[k1]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func onesidedgallopingintersect2by2Cardinality(
|
|
||||||
smallset []uint16,
|
|
||||||
largeset []uint16) int {
|
|
||||||
|
|
||||||
if 0 == len(smallset) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
k1 := 0
|
|
||||||
k2 := 0
|
|
||||||
pos := 0
|
|
||||||
s1 := largeset[k1]
|
|
||||||
s2 := smallset[k2]
|
|
||||||
mainwhile:
|
|
||||||
|
|
||||||
for {
|
|
||||||
if s1 < s2 {
|
|
||||||
k1 = advanceUntil(largeset, k1, len(largeset), s2)
|
|
||||||
if k1 == len(largeset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = largeset[k1]
|
|
||||||
}
|
|
||||||
if s2 < s1 {
|
|
||||||
k2++
|
|
||||||
if k2 == len(smallset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s2 = smallset[k2]
|
|
||||||
} else {
|
|
||||||
|
|
||||||
pos++
|
|
||||||
k2++
|
|
||||||
if k2 == len(smallset) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s2 = smallset[k2]
|
|
||||||
k1 = advanceUntil(largeset, k1, len(largeset), s2)
|
|
||||||
if k1 == len(largeset) {
|
|
||||||
break mainwhile
|
|
||||||
}
|
|
||||||
s1 = largeset[k1]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func binarySearch(array []uint16, ikey uint16) int {
|
|
||||||
low := 0
|
|
||||||
high := len(array) - 1
|
|
||||||
for low+16 <= high {
|
|
||||||
middleIndex := int(uint32(low+high) >> 1)
|
|
||||||
middleValue := array[middleIndex]
|
|
||||||
if middleValue < ikey {
|
|
||||||
low = middleIndex + 1
|
|
||||||
} else if middleValue > ikey {
|
|
||||||
high = middleIndex - 1
|
|
||||||
} else {
|
|
||||||
return middleIndex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for ; low <= high; low++ {
|
|
||||||
val := array[low]
|
|
||||||
if val >= ikey {
|
|
||||||
if val == ikey {
|
|
||||||
return low
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -(low + 1)
|
|
||||||
}
|
|
36
vendor/github.com/RoaringBitmap/roaring/shortiterator.go
generated
vendored
36
vendor/github.com/RoaringBitmap/roaring/shortiterator.go
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
type shortIterable interface {
|
|
||||||
hasNext() bool
|
|
||||||
next() uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
type shortIterator struct {
|
|
||||||
slice []uint16
|
|
||||||
loc int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si *shortIterator) hasNext() bool {
|
|
||||||
return si.loc < len(si.slice)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si *shortIterator) next() uint16 {
|
|
||||||
a := si.slice[si.loc]
|
|
||||||
si.loc++
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
type reverseIterator struct {
|
|
||||||
slice []uint16
|
|
||||||
loc int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si *reverseIterator) hasNext() bool {
|
|
||||||
return si.loc >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si *reverseIterator) next() uint16 {
|
|
||||||
a := si.slice[si.loc]
|
|
||||||
si.loc--
|
|
||||||
return a
|
|
||||||
}
|
|
383
vendor/github.com/RoaringBitmap/roaring/smat.go
generated
vendored
383
vendor/github.com/RoaringBitmap/roaring/smat.go
generated
vendored
@ -1,383 +0,0 @@
|
|||||||
// +build gofuzz
|
|
||||||
|
|
||||||
/*
|
|
||||||
# Instructions for smat testing for roaring
|
|
||||||
|
|
||||||
[smat](https://github.com/mschoch/smat) is a framework that provides
|
|
||||||
state machine assisted fuzz testing.
|
|
||||||
|
|
||||||
To run the smat tests for roaring...
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
$ go get github.com/dvyukov/go-fuzz/go-fuzz
|
|
||||||
$ go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
|
||||||
|
|
||||||
## Steps
|
|
||||||
|
|
||||||
1. Generate initial smat corpus:
|
|
||||||
```
|
|
||||||
go test -tags=gofuzz -run=TestGenerateSmatCorpus
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Build go-fuzz test program with instrumentation:
|
|
||||||
```
|
|
||||||
go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Run go-fuzz:
|
|
||||||
```
|
|
||||||
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see output like...
|
|
||||||
```
|
|
||||||
2016/09/16 13:58:35 slaves: 8, corpus: 1 (3s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 3s
|
|
||||||
2016/09/16 13:58:38 slaves: 8, corpus: 1 (6s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 6s
|
|
||||||
2016/09/16 13:58:41 slaves: 8, corpus: 1 (9s ago), crashers: 0, restarts: 1/44, execs: 44 (5/sec), cover: 0, uptime: 9s
|
|
||||||
2016/09/16 13:58:44 slaves: 8, corpus: 1 (12s ago), crashers: 0, restarts: 1/45, execs: 45 (4/sec), cover: 0, uptime: 12s
|
|
||||||
2016/09/16 13:58:47 slaves: 8, corpus: 1 (15s ago), crashers: 0, restarts: 1/46, execs: 46 (3/sec), cover: 0, uptime: 15s
|
|
||||||
2016/09/16 13:58:50 slaves: 8, corpus: 1 (18s ago), crashers: 0, restarts: 1/47, execs: 47 (3/sec), cover: 0, uptime: 18s
|
|
||||||
2016/09/16 13:58:53 slaves: 8, corpus: 1 (21s ago), crashers: 0, restarts: 1/63, execs: 63 (3/sec), cover: 0, uptime: 21s
|
|
||||||
2016/09/16 13:58:56 slaves: 8, corpus: 1 (24s ago), crashers: 0, restarts: 1/65, execs: 65 (3/sec), cover: 0, uptime: 24s
|
|
||||||
2016/09/16 13:58:59 slaves: 8, corpus: 1 (27s ago), crashers: 0, restarts: 1/66, execs: 66 (2/sec), cover: 0, uptime: 27s
|
|
||||||
2016/09/16 13:59:02 slaves: 8, corpus: 1 (30s ago), crashers: 0, restarts: 1/67, execs: 67 (2/sec), cover: 0, uptime: 30s
|
|
||||||
2016/09/16 13:59:05 slaves: 8, corpus: 1 (33s ago), crashers: 0, restarts: 1/83, execs: 83 (3/sec), cover: 0, uptime: 33s
|
|
||||||
2016/09/16 13:59:08 slaves: 8, corpus: 1 (36s ago), crashers: 0, restarts: 1/84, execs: 84 (2/sec), cover: 0, uptime: 36s
|
|
||||||
2016/09/16 13:59:11 slaves: 8, corpus: 2 (0s ago), crashers: 0, restarts: 1/85, execs: 85 (2/sec), cover: 0, uptime: 39s
|
|
||||||
2016/09/16 13:59:14 slaves: 8, corpus: 17 (2s ago), crashers: 0, restarts: 1/86, execs: 86 (2/sec), cover: 480, uptime: 42s
|
|
||||||
2016/09/16 13:59:17 slaves: 8, corpus: 17 (5s ago), crashers: 0, restarts: 1/66, execs: 132 (3/sec), cover: 487, uptime: 45s
|
|
||||||
2016/09/16 13:59:20 slaves: 8, corpus: 17 (8s ago), crashers: 0, restarts: 1/440, execs: 2645 (55/sec), cover: 487, uptime: 48s
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Let it run, and if the # of crashers is > 0, check out the reports in
|
|
||||||
the workdir where you should be able to find the panic goroutine stack
|
|
||||||
traces.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/mschoch/smat"
|
|
||||||
"github.com/willf/bitset"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fuzz test using state machine driven by byte stream.
|
|
||||||
func FuzzSmat(data []byte) int {
|
|
||||||
return smat.Fuzz(&smatContext{}, smat.ActionID('S'), smat.ActionID('T'),
|
|
||||||
smatActionMap, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
var smatDebug = false
|
|
||||||
|
|
||||||
func smatLog(prefix, format string, args ...interface{}) {
|
|
||||||
if smatDebug {
|
|
||||||
fmt.Print(prefix)
|
|
||||||
fmt.Printf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type smatContext struct {
|
|
||||||
pairs []*smatPair
|
|
||||||
|
|
||||||
// Two registers, x & y.
|
|
||||||
x int
|
|
||||||
y int
|
|
||||||
|
|
||||||
actions int
|
|
||||||
}
|
|
||||||
|
|
||||||
type smatPair struct {
|
|
||||||
bm *Bitmap
|
|
||||||
bs *bitset.BitSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
|
||||||
|
|
||||||
var smatActionMap = smat.ActionMap{
|
|
||||||
smat.ActionID('X'): smatAction("x++", smatWrap(func(c *smatContext) { c.x++ })),
|
|
||||||
smat.ActionID('x'): smatAction("x--", smatWrap(func(c *smatContext) { c.x-- })),
|
|
||||||
smat.ActionID('Y'): smatAction("y++", smatWrap(func(c *smatContext) { c.y++ })),
|
|
||||||
smat.ActionID('y'): smatAction("y--", smatWrap(func(c *smatContext) { c.y-- })),
|
|
||||||
smat.ActionID('*'): smatAction("x*y", smatWrap(func(c *smatContext) { c.x = c.x * c.y })),
|
|
||||||
smat.ActionID('<'): smatAction("x<<", smatWrap(func(c *smatContext) { c.x = c.x << 1 })),
|
|
||||||
|
|
||||||
smat.ActionID('^'): smatAction("swap", smatWrap(func(c *smatContext) { c.x, c.y = c.y, c.x })),
|
|
||||||
|
|
||||||
smat.ActionID('['): smatAction(" pushPair", smatWrap(smatPushPair)),
|
|
||||||
smat.ActionID(']'): smatAction(" popPair", smatWrap(smatPopPair)),
|
|
||||||
|
|
||||||
smat.ActionID('B'): smatAction(" setBit", smatWrap(smatSetBit)),
|
|
||||||
smat.ActionID('b'): smatAction(" removeBit", smatWrap(smatRemoveBit)),
|
|
||||||
|
|
||||||
smat.ActionID('o'): smatAction(" or", smatWrap(smatOr)),
|
|
||||||
smat.ActionID('a'): smatAction(" and", smatWrap(smatAnd)),
|
|
||||||
|
|
||||||
smat.ActionID('#'): smatAction(" cardinality", smatWrap(smatCardinality)),
|
|
||||||
|
|
||||||
smat.ActionID('O'): smatAction(" orCardinality", smatWrap(smatOrCardinality)),
|
|
||||||
smat.ActionID('A'): smatAction(" andCardinality", smatWrap(smatAndCardinality)),
|
|
||||||
|
|
||||||
smat.ActionID('c'): smatAction(" clear", smatWrap(smatClear)),
|
|
||||||
smat.ActionID('r'): smatAction(" runOptimize", smatWrap(smatRunOptimize)),
|
|
||||||
|
|
||||||
smat.ActionID('e'): smatAction(" isEmpty", smatWrap(smatIsEmpty)),
|
|
||||||
|
|
||||||
smat.ActionID('i'): smatAction(" intersects", smatWrap(smatIntersects)),
|
|
||||||
|
|
||||||
smat.ActionID('f'): smatAction(" flip", smatWrap(smatFlip)),
|
|
||||||
|
|
||||||
smat.ActionID('-'): smatAction(" difference", smatWrap(smatDifference)),
|
|
||||||
}
|
|
||||||
|
|
||||||
var smatRunningPercentActions []smat.PercentAction
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
var ids []int
|
|
||||||
for actionId := range smatActionMap {
|
|
||||||
ids = append(ids, int(actionId))
|
|
||||||
}
|
|
||||||
sort.Ints(ids)
|
|
||||||
|
|
||||||
pct := 100 / len(smatActionMap)
|
|
||||||
for _, actionId := range ids {
|
|
||||||
smatRunningPercentActions = append(smatRunningPercentActions,
|
|
||||||
smat.PercentAction{pct, smat.ActionID(actionId)})
|
|
||||||
}
|
|
||||||
|
|
||||||
smatActionMap[smat.ActionID('S')] = smatAction("SETUP", smatSetupFunc)
|
|
||||||
smatActionMap[smat.ActionID('T')] = smatAction("TEARDOWN", smatTeardownFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We only have one smat state: running.
|
|
||||||
func smatRunning(next byte) smat.ActionID {
|
|
||||||
return smat.PercentExecute(next, smatRunningPercentActions...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatAction(name string, f func(ctx smat.Context) (smat.State, error)) func(smat.Context) (smat.State, error) {
|
|
||||||
return func(ctx smat.Context) (smat.State, error) {
|
|
||||||
c := ctx.(*smatContext)
|
|
||||||
c.actions++
|
|
||||||
|
|
||||||
smatLog(" ", "%s\n", name)
|
|
||||||
|
|
||||||
return f(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates an smat action func based on a simple callback.
|
|
||||||
func smatWrap(cb func(c *smatContext)) func(smat.Context) (next smat.State, err error) {
|
|
||||||
return func(ctx smat.Context) (next smat.State, err error) {
|
|
||||||
c := ctx.(*smatContext)
|
|
||||||
cb(c)
|
|
||||||
return smatRunning, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invokes a callback function with the input v bounded to len(c.pairs).
|
|
||||||
func (c *smatContext) withPair(v int, cb func(*smatPair)) {
|
|
||||||
if len(c.pairs) > 0 {
|
|
||||||
if v < 0 {
|
|
||||||
v = -v
|
|
||||||
}
|
|
||||||
v = v % len(c.pairs)
|
|
||||||
cb(c.pairs[v])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
|
||||||
|
|
||||||
func smatSetupFunc(ctx smat.Context) (next smat.State, err error) {
|
|
||||||
return smatRunning, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatTeardownFunc(ctx smat.Context) (next smat.State, err error) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
|
||||||
|
|
||||||
func smatPushPair(c *smatContext) {
|
|
||||||
c.pairs = append(c.pairs, &smatPair{
|
|
||||||
bm: NewBitmap(),
|
|
||||||
bs: bitset.New(100),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatPopPair(c *smatContext) {
|
|
||||||
if len(c.pairs) > 0 {
|
|
||||||
c.pairs = c.pairs[0 : len(c.pairs)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatSetBit(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(p *smatPair) {
|
|
||||||
y := uint32(c.y)
|
|
||||||
p.bm.AddInt(int(y))
|
|
||||||
p.bs.Set(uint(y))
|
|
||||||
p.checkEquals()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatRemoveBit(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(p *smatPair) {
|
|
||||||
y := uint32(c.y)
|
|
||||||
p.bm.Remove(y)
|
|
||||||
p.bs.Clear(uint(y))
|
|
||||||
p.checkEquals()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatAnd(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
px.bm.And(py.bm)
|
|
||||||
px.bs = px.bs.Intersection(py.bs)
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatOr(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
px.bm.Or(py.bm)
|
|
||||||
px.bs = px.bs.Union(py.bs)
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatAndCardinality(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
c0 := px.bm.AndCardinality(py.bm)
|
|
||||||
c1 := px.bs.IntersectionCardinality(py.bs)
|
|
||||||
if c0 != uint64(c1) {
|
|
||||||
panic("expected same add cardinality")
|
|
||||||
}
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatOrCardinality(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
c0 := px.bm.OrCardinality(py.bm)
|
|
||||||
c1 := px.bs.UnionCardinality(py.bs)
|
|
||||||
if c0 != uint64(c1) {
|
|
||||||
panic("expected same or cardinality")
|
|
||||||
}
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatRunOptimize(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
px.bm.RunOptimize()
|
|
||||||
px.checkEquals()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatClear(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
px.bm.Clear()
|
|
||||||
px.bs = px.bs.ClearAll()
|
|
||||||
px.checkEquals()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatCardinality(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c0 := px.bm.GetCardinality()
|
|
||||||
c1 := px.bs.Count()
|
|
||||||
if c0 != uint64(c1) {
|
|
||||||
panic("expected same cardinality")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatIsEmpty(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c0 := px.bm.IsEmpty()
|
|
||||||
c1 := px.bs.None()
|
|
||||||
if c0 != c1 {
|
|
||||||
panic("expected same is empty")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatIntersects(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
v0 := px.bm.Intersects(py.bm)
|
|
||||||
v1 := px.bs.IntersectionCardinality(py.bs) > 0
|
|
||||||
if v0 != v1 {
|
|
||||||
panic("intersects not equal")
|
|
||||||
}
|
|
||||||
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatFlip(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(p *smatPair) {
|
|
||||||
y := uint32(c.y)
|
|
||||||
p.bm.Flip(uint64(y), uint64(y)+1)
|
|
||||||
p.bs = p.bs.Flip(uint(y))
|
|
||||||
p.checkEquals()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func smatDifference(c *smatContext) {
|
|
||||||
c.withPair(c.x, func(px *smatPair) {
|
|
||||||
c.withPair(c.y, func(py *smatPair) {
|
|
||||||
px.bm.AndNot(py.bm)
|
|
||||||
px.bs = px.bs.Difference(py.bs)
|
|
||||||
px.checkEquals()
|
|
||||||
py.checkEquals()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *smatPair) checkEquals() {
|
|
||||||
if !p.equalsBitSet(p.bs, p.bm) {
|
|
||||||
panic("bitset mismatch")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *smatPair) equalsBitSet(a *bitset.BitSet, b *Bitmap) bool {
|
|
||||||
for i, e := a.NextSet(0); e; i, e = a.NextSet(i + 1) {
|
|
||||||
if !b.ContainsInt(int(i)) {
|
|
||||||
fmt.Printf("in a bitset, not b bitmap, i: %d\n", i)
|
|
||||||
fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
|
|
||||||
a.String(), b.String())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i := b.Iterator()
|
|
||||||
for i.HasNext() {
|
|
||||||
v := i.Next()
|
|
||||||
if !a.Test(uint(v)) {
|
|
||||||
fmt.Printf("in b bitmap, not a bitset, v: %d\n", v)
|
|
||||||
fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
|
|
||||||
a.String(), b.String())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
325
vendor/github.com/RoaringBitmap/roaring/util.go
generated
vendored
325
vendor/github.com/RoaringBitmap/roaring/util.go
generated
vendored
@ -1,325 +0,0 @@
|
|||||||
package roaring
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
arrayDefaultMaxSize = 4096 // containers with 4096 or fewer integers should be array containers.
|
|
||||||
arrayLazyLowerBound = 1024
|
|
||||||
maxCapacity = 1 << 16
|
|
||||||
serialCookieNoRunContainer = 12346 // only arrays and bitmaps
|
|
||||||
invalidCardinality = -1
|
|
||||||
serialCookie = 12347 // runs, arrays, and bitmaps
|
|
||||||
noOffsetThreshold = 4
|
|
||||||
|
|
||||||
// MaxUint32 is the largest uint32 value.
|
|
||||||
MaxUint32 = 4294967295
|
|
||||||
|
|
||||||
// MaxRange is One more than the maximum allowed bitmap bit index. For use as an upper
|
|
||||||
// bound for ranges.
|
|
||||||
MaxRange uint64 = MaxUint32 + 1
|
|
||||||
|
|
||||||
// MaxUint16 is the largest 16 bit unsigned int.
|
|
||||||
// This is the largest value an interval16 can store.
|
|
||||||
MaxUint16 = 65535
|
|
||||||
|
|
||||||
// Compute wordSizeInBytes, the size of a word in bytes.
|
|
||||||
_m = ^uint64(0)
|
|
||||||
_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
|
|
||||||
wordSizeInBytes = 1 << _logS
|
|
||||||
|
|
||||||
// other constants used in ctz_generic.go
|
|
||||||
wordSizeInBits = wordSizeInBytes << 3 // word size in bits
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxWord = 1<<wordSizeInBits - 1
|
|
||||||
|
|
||||||
// doesn't apply to runContainers
|
|
||||||
func getSizeInBytesFromCardinality(card int) int {
|
|
||||||
if card > arrayDefaultMaxSize {
|
|
||||||
// bitmapContainer
|
|
||||||
return maxCapacity / 8
|
|
||||||
}
|
|
||||||
// arrayContainer
|
|
||||||
return 2 * card
|
|
||||||
}
|
|
||||||
|
|
||||||
func fill(arr []uint64, val uint64) {
|
|
||||||
for i := range arr {
|
|
||||||
arr[i] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func fillRange(arr []uint64, start, end int, val uint64) {
|
|
||||||
for i := start; i < end; i++ {
|
|
||||||
arr[i] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func fillArrayAND(container []uint16, bitmap1, bitmap2 []uint64) {
|
|
||||||
if len(bitmap1) != len(bitmap2) {
|
|
||||||
panic("array lengths don't match")
|
|
||||||
}
|
|
||||||
// TODO: rewrite in assembly
|
|
||||||
pos := 0
|
|
||||||
for k := range bitmap1 {
|
|
||||||
bitset := bitmap1[k] & bitmap2[k]
|
|
||||||
for bitset != 0 {
|
|
||||||
t := bitset & -bitset
|
|
||||||
container[pos] = uint16((k*64 + int(popcount(t-1))))
|
|
||||||
pos = pos + 1
|
|
||||||
bitset ^= t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func fillArrayANDNOT(container []uint16, bitmap1, bitmap2 []uint64) {
|
|
||||||
if len(bitmap1) != len(bitmap2) {
|
|
||||||
panic("array lengths don't match")
|
|
||||||
}
|
|
||||||
// TODO: rewrite in assembly
|
|
||||||
pos := 0
|
|
||||||
for k := range bitmap1 {
|
|
||||||
bitset := bitmap1[k] &^ bitmap2[k]
|
|
||||||
for bitset != 0 {
|
|
||||||
t := bitset & -bitset
|
|
||||||
container[pos] = uint16((k*64 + int(popcount(t-1))))
|
|
||||||
pos = pos + 1
|
|
||||||
bitset ^= t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func fillArrayXOR(container []uint16, bitmap1, bitmap2 []uint64) {
|
|
||||||
if len(bitmap1) != len(bitmap2) {
|
|
||||||
panic("array lengths don't match")
|
|
||||||
}
|
|
||||||
// TODO: rewrite in assembly
|
|
||||||
pos := 0
|
|
||||||
for k := 0; k < len(bitmap1); k++ {
|
|
||||||
bitset := bitmap1[k] ^ bitmap2[k]
|
|
||||||
for bitset != 0 {
|
|
||||||
t := bitset & -bitset
|
|
||||||
container[pos] = uint16((k*64 + int(popcount(t-1))))
|
|
||||||
pos = pos + 1
|
|
||||||
bitset ^= t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func highbits(x uint32) uint16 {
|
|
||||||
return uint16(x >> 16)
|
|
||||||
}
|
|
||||||
func lowbits(x uint32) uint16 {
|
|
||||||
return uint16(x & 0xFFFF)
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxLowBit = 0xFFFF
|
|
||||||
|
|
||||||
func flipBitmapRange(bitmap []uint64, start int, end int) {
|
|
||||||
if start >= end {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
firstword := start / 64
|
|
||||||
endword := (end - 1) / 64
|
|
||||||
bitmap[firstword] ^= ^(^uint64(0) << uint(start%64))
|
|
||||||
for i := firstword; i < endword; i++ {
|
|
||||||
bitmap[i] = ^bitmap[i]
|
|
||||||
}
|
|
||||||
bitmap[endword] ^= ^uint64(0) >> (uint(-end) % 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resetBitmapRange(bitmap []uint64, start int, end int) {
|
|
||||||
if start >= end {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
firstword := start / 64
|
|
||||||
endword := (end - 1) / 64
|
|
||||||
if firstword == endword {
|
|
||||||
bitmap[firstword] &= ^((^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bitmap[firstword] &= ^(^uint64(0) << uint(start%64))
|
|
||||||
for i := firstword + 1; i < endword; i++ {
|
|
||||||
bitmap[i] = 0
|
|
||||||
}
|
|
||||||
bitmap[endword] &= ^(^uint64(0) >> (uint(-end) % 64))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func setBitmapRange(bitmap []uint64, start int, end int) {
|
|
||||||
if start >= end {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
firstword := start / 64
|
|
||||||
endword := (end - 1) / 64
|
|
||||||
if firstword == endword {
|
|
||||||
bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bitmap[firstword] |= ^uint64(0) << uint(start%64)
|
|
||||||
for i := firstword + 1; i < endword; i++ {
|
|
||||||
bitmap[i] = ^uint64(0)
|
|
||||||
}
|
|
||||||
bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func flipBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
|
|
||||||
before := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
flipBitmapRange(bitmap, start, end)
|
|
||||||
after := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
return int(after - before)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resetBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
|
|
||||||
before := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
resetBitmapRange(bitmap, start, end)
|
|
||||||
after := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
return int(after - before)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
|
|
||||||
before := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
setBitmapRange(bitmap, start, end)
|
|
||||||
after := wordCardinalityForBitmapRange(bitmap, start, end)
|
|
||||||
return int(after - before)
|
|
||||||
}
|
|
||||||
|
|
||||||
func wordCardinalityForBitmapRange(bitmap []uint64, start int, end int) uint64 {
|
|
||||||
answer := uint64(0)
|
|
||||||
if start >= end {
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
firstword := start / 64
|
|
||||||
endword := (end - 1) / 64
|
|
||||||
for i := firstword; i <= endword; i++ {
|
|
||||||
answer += popcount(bitmap[i])
|
|
||||||
}
|
|
||||||
return answer
|
|
||||||
}
|
|
||||||
|
|
||||||
func selectBitPosition(w uint64, j int) int {
|
|
||||||
seen := 0
|
|
||||||
|
|
||||||
// Divide 64bit
|
|
||||||
part := w & 0xFFFFFFFF
|
|
||||||
n := popcount(part)
|
|
||||||
if n <= uint64(j) {
|
|
||||||
part = w >> 32
|
|
||||||
seen += 32
|
|
||||||
j -= int(n)
|
|
||||||
}
|
|
||||||
w = part
|
|
||||||
|
|
||||||
// Divide 32bit
|
|
||||||
part = w & 0xFFFF
|
|
||||||
n = popcount(part)
|
|
||||||
if n <= uint64(j) {
|
|
||||||
part = w >> 16
|
|
||||||
seen += 16
|
|
||||||
j -= int(n)
|
|
||||||
}
|
|
||||||
w = part
|
|
||||||
|
|
||||||
// Divide 16bit
|
|
||||||
part = w & 0xFF
|
|
||||||
n = popcount(part)
|
|
||||||
if n <= uint64(j) {
|
|
||||||
part = w >> 8
|
|
||||||
seen += 8
|
|
||||||
j -= int(n)
|
|
||||||
}
|
|
||||||
w = part
|
|
||||||
|
|
||||||
// Lookup in final byte
|
|
||||||
var counter uint
|
|
||||||
for counter = 0; counter < 8; counter++ {
|
|
||||||
j -= int((w >> counter) & 1)
|
|
||||||
if j < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return seen + int(counter)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicOn(err error) {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ph struct {
|
|
||||||
orig int
|
|
||||||
rand int
|
|
||||||
}
|
|
||||||
|
|
||||||
type pha []ph
|
|
||||||
|
|
||||||
func (p pha) Len() int { return len(p) }
|
|
||||||
func (p pha) Less(i, j int) bool { return p[i].rand < p[j].rand }
|
|
||||||
func (p pha) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
||||||
|
|
||||||
func getRandomPermutation(n int) []int {
|
|
||||||
r := make([]ph, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
r[i].orig = i
|
|
||||||
r[i].rand = rand.Intn(1 << 29)
|
|
||||||
}
|
|
||||||
sort.Sort(pha(r))
|
|
||||||
m := make([]int, n)
|
|
||||||
for i := range m {
|
|
||||||
m[i] = r[i].orig
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func minOfInt(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxOfInt(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxOfUint16(a, b uint16) uint16 {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func minOfUint16(a, b uint16) uint16 {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxInt(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxUint16(a, b uint16) uint16 {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func minUint16(a, b uint16) uint16 {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
362
vendor/github.com/anacrolix/dht/LICENSE
generated
vendored
362
vendor/github.com/anacrolix/dht/LICENSE
generated
vendored
@ -1,362 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
25
vendor/github.com/anacrolix/dht/README.md
generated
vendored
25
vendor/github.com/anacrolix/dht/README.md
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
# dht
|
|
||||||
|
|
||||||
[![CircleCI](https://circleci.com/gh/anacrolix/dht.svg?style=shield)](https://circleci.com/gh/anacrolix/dht)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/anacrolix/dht?status.svg)](https://godoc.org/github.com/anacrolix/dht)
|
|
||||||
[![Join the chat at https://gitter.im/anacrolix/torrent](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/anacrolix/torrent?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Install the library package with `go get github.com/anacrolix/dht`, or the provided cmds with `go get github.com/anacrolix/dht/cmd/...`.
|
|
||||||
|
|
||||||
## Commands
|
|
||||||
|
|
||||||
Here I'll describe what some of the provided commands in `./cmd` do.
|
|
||||||
|
|
||||||
Note that the [`godo`](https://github.com/anacrolix/godo) command which is invoked in the following examples builds and executes a Go import path, like `go run`. It's easier to use this convention than to spell out the install/invoke cycle for every single example.
|
|
||||||
|
|
||||||
### dht-ping
|
|
||||||
|
|
||||||
Pings DHT nodes with the given network addresses.
|
|
||||||
|
|
||||||
$ godo ./cmd/dht-ping router.bittorrent.com:6881 router.utorrent.com:6881
|
|
||||||
2015/04/01 17:21:23 main.go:33: dht server on [::]:60058
|
|
||||||
32f54e697351ff4aec29cdbaabf2fbe3467cc267 (router.bittorrent.com:6881): 648.218621ms
|
|
||||||
ebff36697351ff4aec29cdbaabf2fbe3467cc267 (router.utorrent.com:6881): 873.864706ms
|
|
||||||
2/2 responses (100.000000%)
|
|
45
vendor/github.com/anacrolix/dht/addr.go
generated
vendored
45
vendor/github.com/anacrolix/dht/addr.go
generated
vendored
@ -1,45 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Used internally to refer to node network addresses. String() is called a
|
|
||||||
// lot, and so can be optimized. Network() is not exposed, so that the
|
|
||||||
// interface does not satisfy net.Addr, as the underlying type must be passed
|
|
||||||
// to any OS-level function that take net.Addr.
|
|
||||||
type Addr interface {
|
|
||||||
UDPAddr() *net.UDPAddr
|
|
||||||
String() string
|
|
||||||
KRPC() krpc.NodeAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Speeds up some of the commonly called Addr methods.
|
|
||||||
type cachedAddr struct {
|
|
||||||
ua net.UDPAddr
|
|
||||||
s string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ca cachedAddr) String() string {
|
|
||||||
return ca.s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ca cachedAddr) UDPAddr() *net.UDPAddr {
|
|
||||||
return &ca.ua
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ca cachedAddr) KRPC() krpc.NodeAddr {
|
|
||||||
return krpc.NodeAddr{
|
|
||||||
IP: ca.ua.IP,
|
|
||||||
Port: ca.ua.Port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAddr(ua *net.UDPAddr) Addr {
|
|
||||||
return cachedAddr{
|
|
||||||
ua: *ua,
|
|
||||||
s: ua.String(),
|
|
||||||
}
|
|
||||||
}
|
|
233
vendor/github.com/anacrolix/dht/announce.go
generated
vendored
233
vendor/github.com/anacrolix/dht/announce.go
generated
vendored
@ -1,233 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
// get_peers and announce_peers.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/anacrolix/sync"
|
|
||||||
"github.com/anacrolix/torrent/logonce"
|
|
||||||
"github.com/willf/bloom"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Maintains state for an ongoing Announce operation. An Announce is started
|
|
||||||
// by calling Server.Announce.
|
|
||||||
type Announce struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
Peers chan PeersValues
|
|
||||||
// Inner chan is set to nil when on close.
|
|
||||||
values chan PeersValues
|
|
||||||
stop chan struct{}
|
|
||||||
triedAddrs *bloom.BloomFilter
|
|
||||||
// True when contact with all starting addrs has been initiated. This
|
|
||||||
// prevents a race where the first transaction finishes before the rest
|
|
||||||
// have been opened, sees no other transactions are pending and ends the
|
|
||||||
// announce.
|
|
||||||
contactedStartAddrs bool
|
|
||||||
// How many transactions are still ongoing.
|
|
||||||
pending int
|
|
||||||
server *Server
|
|
||||||
infoHash int160
|
|
||||||
// Count of (probably) distinct addresses we've sent get_peers requests
|
|
||||||
// to.
|
|
||||||
numContacted int
|
|
||||||
// The torrent port that we're announcing.
|
|
||||||
announcePort int
|
|
||||||
// The torrent port should be determined by the receiver in case we're
|
|
||||||
// being NATed.
|
|
||||||
announcePortImplied bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the number of distinct remote addresses the announce has queried.
|
|
||||||
func (a *Announce) NumContacted() int {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
return a.numContacted
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBloomFilterForTraversal() *bloom.BloomFilter {
|
|
||||||
return bloom.NewWithEstimates(10000, 0.5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is kind of the main thing you want to do with DHT. It traverses the
|
|
||||||
// graph toward nodes that store peers for the infohash, streaming them to the
|
|
||||||
// caller, and announcing the local node to each node if allowed and
|
|
||||||
// specified.
|
|
||||||
func (s *Server) Announce(infoHash [20]byte, port int, impliedPort bool) (*Announce, error) {
|
|
||||||
startAddrs, err := s.traversalStartingAddrs()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
disc := &Announce{
|
|
||||||
Peers: make(chan PeersValues, 100),
|
|
||||||
stop: make(chan struct{}),
|
|
||||||
values: make(chan PeersValues),
|
|
||||||
triedAddrs: newBloomFilterForTraversal(),
|
|
||||||
server: s,
|
|
||||||
infoHash: int160FromByteArray(infoHash),
|
|
||||||
announcePort: port,
|
|
||||||
announcePortImplied: impliedPort,
|
|
||||||
}
|
|
||||||
// Function ferries from values to Values until discovery is halted.
|
|
||||||
go func() {
|
|
||||||
defer close(disc.Peers)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case psv := <-disc.values:
|
|
||||||
select {
|
|
||||||
case disc.Peers <- psv:
|
|
||||||
case <-disc.stop:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-disc.stop:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
disc.mu.Lock()
|
|
||||||
defer disc.mu.Unlock()
|
|
||||||
for i, addr := range startAddrs {
|
|
||||||
if i != 0 {
|
|
||||||
disc.mu.Unlock()
|
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
disc.mu.Lock()
|
|
||||||
}
|
|
||||||
disc.contact(addr)
|
|
||||||
}
|
|
||||||
disc.contactedStartAddrs = true
|
|
||||||
// If we failed to contact any of the starting addrs, no transactions
|
|
||||||
// will complete triggering a check that there are no pending
|
|
||||||
// responses.
|
|
||||||
disc.maybeClose()
|
|
||||||
}()
|
|
||||||
return disc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validNodeAddr(addr Addr) bool {
|
|
||||||
ua := addr.UDPAddr()
|
|
||||||
if ua.Port == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ip4 := ua.IP.To4(); ip4 != nil && ip4[0] == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Merge this with maybeGetPeersFromAddr.
|
|
||||||
func (a *Announce) gotNodeAddr(addr Addr) {
|
|
||||||
if !validNodeAddr(addr) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if a.triedAddrs.Test([]byte(addr.String())) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if a.server.ipBlocked(addr.UDPAddr().IP) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.contact(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Merge this with maybeGetPeersFromAddr.
|
|
||||||
func (a *Announce) contact(addr Addr) {
|
|
||||||
a.numContacted++
|
|
||||||
a.triedAddrs.Add([]byte(addr.String()))
|
|
||||||
a.pending++
|
|
||||||
go func() {
|
|
||||||
err := a.getPeers(addr)
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.mu.Lock()
|
|
||||||
a.transactionClosed()
|
|
||||||
a.mu.Unlock()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Announce) maybeClose() {
|
|
||||||
if a.contactedStartAddrs && a.pending == 0 {
|
|
||||||
a.close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Announce) transactionClosed() {
|
|
||||||
a.pending--
|
|
||||||
a.maybeClose()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Announce) responseNode(node krpc.NodeInfo) {
|
|
||||||
a.gotNodeAddr(NewAddr(node.Addr.UDP()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Announce to a peer, if appropriate.
|
|
||||||
func (a *Announce) maybeAnnouncePeer(to Addr, token string, peerId *krpc.ID) {
|
|
||||||
if !a.server.config.NoSecurity && (peerId == nil || !NodeIdSecure(*peerId, to.UDPAddr().IP)) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.server.mu.Lock()
|
|
||||||
defer a.server.mu.Unlock()
|
|
||||||
err := a.server.announcePeer(to, a.infoHash, a.announcePort, token, a.announcePortImplied, nil)
|
|
||||||
if err != nil {
|
|
||||||
logonce.Stderr.Printf("error announcing peer: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Announce) getPeers(addr Addr) error {
|
|
||||||
a.server.mu.Lock()
|
|
||||||
defer a.server.mu.Unlock()
|
|
||||||
return a.server.getPeers(addr, a.infoHash, func(m krpc.Msg, err error) {
|
|
||||||
// Register suggested nodes closer to the target info-hash.
|
|
||||||
if m.R != nil && m.SenderID() != nil {
|
|
||||||
expvars.Add("announce get_peers response nodes values", int64(len(m.R.Nodes)))
|
|
||||||
expvars.Add("announce get_peers response nodes6 values", int64(len(m.R.Nodes6)))
|
|
||||||
a.mu.Lock()
|
|
||||||
for _, n := range m.R.Nodes {
|
|
||||||
a.responseNode(n)
|
|
||||||
}
|
|
||||||
for _, n := range m.R.Nodes6 {
|
|
||||||
a.responseNode(n)
|
|
||||||
}
|
|
||||||
a.mu.Unlock()
|
|
||||||
select {
|
|
||||||
case a.values <- PeersValues{
|
|
||||||
Peers: m.R.Values,
|
|
||||||
NodeInfo: krpc.NodeInfo{
|
|
||||||
Addr: addr.KRPC(),
|
|
||||||
ID: *m.SenderID(),
|
|
||||||
},
|
|
||||||
}:
|
|
||||||
case <-a.stop:
|
|
||||||
}
|
|
||||||
a.maybeAnnouncePeer(addr, m.R.Token, m.SenderID())
|
|
||||||
}
|
|
||||||
a.mu.Lock()
|
|
||||||
a.transactionClosed()
|
|
||||||
a.mu.Unlock()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Corresponds to the "values" key in a get_peers KRPC response. A list of
|
|
||||||
// peers that a node has reported as being in the swarm for a queried info
|
|
||||||
// hash.
|
|
||||||
type PeersValues struct {
|
|
||||||
Peers []Peer // Peers given in get_peers response.
|
|
||||||
krpc.NodeInfo // The node that gave the response.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop the announce.
|
|
||||||
func (a *Announce) Close() {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
a.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Announce) close() {
|
|
||||||
select {
|
|
||||||
case <-a.stop:
|
|
||||||
default:
|
|
||||||
close(a.stop)
|
|
||||||
}
|
|
||||||
}
|
|
37
vendor/github.com/anacrolix/dht/bucket.go
generated
vendored
37
vendor/github.com/anacrolix/dht/bucket.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
type bucket struct {
|
|
||||||
nodes map[*node]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bucket) Len() int {
|
|
||||||
return len(b.nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bucket) EachNode(f func(*node) bool) bool {
|
|
||||||
for n := range b.nodes {
|
|
||||||
if !f(n) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bucket) AddNode(n *node, k int) {
|
|
||||||
if _, ok := b.nodes[n]; ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b.nodes == nil {
|
|
||||||
b.nodes = make(map[*node]struct{}, k)
|
|
||||||
}
|
|
||||||
b.nodes[n] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bucket) GetNode(addr Addr, id int160) *node {
|
|
||||||
for n := range b.nodes {
|
|
||||||
if n.hasAddrAndID(addr, id) {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
131
vendor/github.com/anacrolix/dht/dht.go
generated
vendored
131
vendor/github.com/anacrolix/dht/dht.go
generated
vendored
@ -1,131 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
crand "crypto/rand"
|
|
||||||
_ "crypto/sha1"
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/anacrolix/missinggo"
|
|
||||||
"github.com/anacrolix/torrent/iplist"
|
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func defaultQueryResendDelay() time.Duration {
|
|
||||||
return jitterDuration(5*time.Second, time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uniquely identifies a transaction to us.
|
|
||||||
type transactionKey struct {
|
|
||||||
RemoteAddr string // host:port
|
|
||||||
T string // The KRPC transaction ID.
|
|
||||||
}
|
|
||||||
|
|
||||||
type StartingNodesGetter func() ([]Addr, error)
|
|
||||||
|
|
||||||
// ServerConfig allows to set up a configuration of the `Server` instance
|
|
||||||
// to be created with NewServer
|
|
||||||
type ServerConfig struct {
|
|
||||||
// Set NodeId Manually. Caller must ensure that if NodeId does not conform
|
|
||||||
// to DHT Security Extensions, that NoSecurity is also set.
|
|
||||||
NodeId [20]byte
|
|
||||||
Conn net.PacketConn
|
|
||||||
// Don't respond to queries from other nodes.
|
|
||||||
Passive bool
|
|
||||||
StartingNodes StartingNodesGetter
|
|
||||||
// Disable the DHT security extension:
|
|
||||||
// http://www.libtorrent.org/dht_sec.html.
|
|
||||||
NoSecurity bool
|
|
||||||
// Initial IP blocklist to use. Applied before serving and bootstrapping
|
|
||||||
// begins.
|
|
||||||
IPBlocklist iplist.Ranger
|
|
||||||
// Used to secure the server's ID. Defaults to the Conn's LocalAddr(). Set
|
|
||||||
// to the IP that remote nodes will see, as that IP is what they'll use to
|
|
||||||
// validate our ID.
|
|
||||||
PublicIP net.IP
|
|
||||||
|
|
||||||
// Hook received queries. Return false if you don't want to propagate to
|
|
||||||
// the default handlers.
|
|
||||||
OnQuery func(query *krpc.Msg, source net.Addr) (propagate bool)
|
|
||||||
// Called when a peer successfully announces to us.
|
|
||||||
OnAnnouncePeer func(infoHash metainfo.Hash, peer Peer)
|
|
||||||
// How long to wait before resending queries that haven't received a
|
|
||||||
// response. Defaults to a random value between 4.5 and 5.5s.
|
|
||||||
QueryResendDelay func() time.Duration
|
|
||||||
// TODO: Expose Peers, to return NodeInfo for received get_peers queries.
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerStats instance is returned by Server.Stats() and stores Server metrics
|
|
||||||
type ServerStats struct {
|
|
||||||
// Count of nodes in the node table that responded to our last query or
|
|
||||||
// haven't yet been queried.
|
|
||||||
GoodNodes int
|
|
||||||
// Count of nodes in the node table.
|
|
||||||
Nodes int
|
|
||||||
// Transactions awaiting a response.
|
|
||||||
OutstandingTransactions int
|
|
||||||
// Individual announce_peer requests that got a success response.
|
|
||||||
SuccessfulOutboundAnnouncePeerQueries int64
|
|
||||||
// Nodes that have been blocked.
|
|
||||||
BadNodes uint
|
|
||||||
OutboundQueriesAttempted int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func jitterDuration(average time.Duration, plusMinus time.Duration) time.Duration {
|
|
||||||
return average - plusMinus/2 + time.Duration(rand.Int63n(int64(plusMinus)))
|
|
||||||
}
|
|
||||||
|
|
||||||
type Peer = krpc.NodeAddr
|
|
||||||
|
|
||||||
func GlobalBootstrapAddrs() (addrs []Addr, err error) {
|
|
||||||
for _, s := range []string{
|
|
||||||
"router.utorrent.com:6881",
|
|
||||||
"router.bittorrent.com:6881",
|
|
||||||
"dht.transmissionbt.com:6881",
|
|
||||||
"dht.aelitis.com:6881", // Vuze
|
|
||||||
"router.silotis.us:6881", // IPv6
|
|
||||||
"dht.libtorrent.org:25401", // @arvidn's
|
|
||||||
|
|
||||||
} {
|
|
||||||
host, port, err := net.SplitHostPort(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
hostAddrs, err := net.LookupHost(host)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error looking up %q: %v", s, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, a := range hostAddrs {
|
|
||||||
ua, err := net.ResolveUDPAddr("udp", net.JoinHostPort(a, port))
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error resolving %q: %v", a, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addrs = append(addrs, NewAddr(ua))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(addrs) == 0 {
|
|
||||||
err = errors.New("nothing resolved")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func RandomNodeID() (id [20]byte) {
|
|
||||||
crand.Read(id[:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakeDeterministicNodeID(public net.Addr) (id [20]byte) {
|
|
||||||
h := crypto.SHA1.New()
|
|
||||||
h.Write([]byte(public.String()))
|
|
||||||
h.Sum(id[:0:20])
|
|
||||||
SecureNodeId(&id, missinggo.AddrIP(public))
|
|
||||||
return
|
|
||||||
}
|
|
22
vendor/github.com/anacrolix/dht/doc.go
generated
vendored
22
vendor/github.com/anacrolix/dht/doc.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
// Package dht implements a Distributed Hash Table (DHT) part of
|
|
||||||
// the BitTorrent protocol,
|
|
||||||
// as specified by BEP 5: http://www.bittorrent.org/beps/bep_0005.html
|
|
||||||
//
|
|
||||||
// BitTorrent uses a "distributed hash table" (DHT)
|
|
||||||
// for storing peer contact information for "trackerless" torrents.
|
|
||||||
// In effect, each peer becomes a tracker.
|
|
||||||
// The protocol is based on Kademila DHT protocol and is implemented over UDP.
|
|
||||||
//
|
|
||||||
// Please note the terminology used to avoid confusion.
|
|
||||||
// A "peer" is a client/server listening on a TCP port that
|
|
||||||
// implements the BitTorrent protocol.
|
|
||||||
// A "node" is a client/server listening on a UDP port implementing
|
|
||||||
// the distributed hash table protocol.
|
|
||||||
// The DHT is composed of nodes and stores the location of peers.
|
|
||||||
// BitTorrent clients include a DHT node, which is used to contact other nodes
|
|
||||||
// in the DHT to get the location of peers to
|
|
||||||
// download from using the BitTorrent protocol.
|
|
||||||
//
|
|
||||||
// Standard use involves creating a Server, and calling Announce on it with
|
|
||||||
// the details of your local torrent client and infohash of interest.
|
|
||||||
package dht
|
|
19
vendor/github.com/anacrolix/dht/expvar.go
generated
vendored
19
vendor/github.com/anacrolix/dht/expvar.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"expvar"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
read = expvar.NewInt("dhtRead")
|
|
||||||
readZeroPort = expvar.NewInt("dhtReadZeroPort")
|
|
||||||
readBlocked = expvar.NewInt("dhtReadBlocked")
|
|
||||||
readNotKRPCDict = expvar.NewInt("dhtReadNotKRPCDict")
|
|
||||||
readUnmarshalError = expvar.NewInt("dhtReadUnmarshalError")
|
|
||||||
readQuery = expvar.NewInt("dhtReadQuery")
|
|
||||||
readAnnouncePeer = expvar.NewInt("dhtReadAnnouncePeer")
|
|
||||||
announceErrors = expvar.NewInt("dhtAnnounceErrors")
|
|
||||||
writeErrors = expvar.NewInt("dhtWriteErrors")
|
|
||||||
writes = expvar.NewInt("dhtWrites")
|
|
||||||
expvars = expvar.NewMap("dht")
|
|
||||||
)
|
|
16
vendor/github.com/anacrolix/dht/go.mod
generated
vendored
16
vendor/github.com/anacrolix/dht/go.mod
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
module github.com/anacrolix/dht
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180621131740-7fc7cfea16ea
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c
|
|
||||||
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2
|
|
||||||
github.com/davecgh/go-spew v1.1.0
|
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
|
|
||||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712
|
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
|
||||||
github.com/stretchr/testify v1.2.1
|
|
||||||
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16
|
|
||||||
)
|
|
56
vendor/github.com/anacrolix/dht/go.sum
generated
vendored
56
vendor/github.com/anacrolix/dht/go.sum
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
|
||||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
|
||||||
github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI=
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa h1:xCaATLKmn39QqLs3tUZYr6eKvezJV+FYvVOLTklxK6U=
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
|
||||||
github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E=
|
|
||||||
github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180621131740-7fc7cfea16ea h1:zgemcMeWIWXhwvxFSqVZzW695Q0erYNkfM1X3lZf/8w=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180621131740-7fc7cfea16ea/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
|
|
||||||
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01 h1:14t4kCoWXaUXrHErRD0bLMNolOE50nyPA0gO8+J3hP8=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c h1:kBea2eTysvA8qHScuAufErjwV6/wwqoOe03h7W9d1h0=
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
|
||||||
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb h1:XTz2SGqfyyk/fvDYPZe3VaZqAH4heDTMz5WQmBm6F5w=
|
|
||||||
github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs=
|
|
||||||
github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
|
|
||||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2 h1:1B/+1BcRhOMG1KH/YhNIU8OppSWk5d/NGyfRla88CuY=
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
|
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e h1:Fw7ZmgiklsLh5EQWyHh1sumKSCG1+yjEctIpGKib87s=
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
|
||||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
|
|
||||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
|
|
||||||
github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
|
|
||||||
github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk=
|
|
||||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
|
||||||
github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
|
||||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U=
|
|
||||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
|
|
||||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
|
||||||
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
|
|
||||||
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
|
||||||
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16 h1:hDGatoumfYOxzIZGsSylJuxTzu9k86BJl8OQhm72anI=
|
|
||||||
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
87
vendor/github.com/anacrolix/dht/int160.go
generated
vendored
87
vendor/github.com/anacrolix/dht/int160.go
generated
vendored
@ -1,87 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
type int160 struct {
|
|
||||||
bits [20]uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) AsByteArray() [20]byte {
|
|
||||||
return me.bits
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) ByteString() string {
|
|
||||||
return string(me.bits[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) BitLen() int {
|
|
||||||
var a big.Int
|
|
||||||
a.SetBytes(me.bits[:])
|
|
||||||
return a.BitLen()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) SetBytes(b []byte) {
|
|
||||||
n := copy(me.bits[:], b)
|
|
||||||
if n != 20 {
|
|
||||||
panic(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me int160) Bytes() []byte {
|
|
||||||
return me.bits[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l int160) Cmp(r int160) int {
|
|
||||||
for i := range l.bits {
|
|
||||||
if l.bits[i] < r.bits[i] {
|
|
||||||
return -1
|
|
||||||
} else if l.bits[i] > r.bits[i] {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) SetMax() {
|
|
||||||
for i := range me.bits {
|
|
||||||
me.bits[i] = math.MaxUint8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) Xor(a, b *int160) {
|
|
||||||
for i := range me.bits {
|
|
||||||
me.bits[i] = a.bits[i] ^ b.bits[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *int160) IsZero() bool {
|
|
||||||
for _, b := range me.bits {
|
|
||||||
if b != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func int160FromBytes(b []byte) (ret int160) {
|
|
||||||
ret.SetBytes(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func int160FromByteArray(b [20]byte) (ret int160) {
|
|
||||||
ret.SetBytes(b[:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func int160FromByteString(s string) (ret int160) {
|
|
||||||
ret.SetBytes([]byte(s))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func distance(a, b *int160) (ret int160) {
|
|
||||||
ret.Xor(a, b)
|
|
||||||
return
|
|
||||||
}
|
|
25
vendor/github.com/anacrolix/dht/krpc/CompactIPv4NodeAddrs.go
generated
vendored
25
vendor/github.com/anacrolix/dht/krpc/CompactIPv4NodeAddrs.go
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
type CompactIPv4NodeAddrs []NodeAddr
|
|
||||||
|
|
||||||
func (CompactIPv4NodeAddrs) ElemSize() int { return 6 }
|
|
||||||
|
|
||||||
func (me CompactIPv4NodeAddrs) MarshalBinary() ([]byte, error) {
|
|
||||||
return marshalBinarySlice(me)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv4NodeAddrs) MarshalBencode() ([]byte, error) {
|
|
||||||
return bencodeBytesResult(me.MarshalBinary())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv4NodeAddrs) UnmarshalBinary(b []byte) error {
|
|
||||||
return unmarshalBinarySlice(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv4NodeAddrs) UnmarshalBencode(b []byte) error {
|
|
||||||
return unmarshalBencodedBinary(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv4NodeAddrs) NodeAddrs() []NodeAddr {
|
|
||||||
return me
|
|
||||||
}
|
|
37
vendor/github.com/anacrolix/dht/krpc/CompactIPv4NodeInfo.go
generated
vendored
37
vendor/github.com/anacrolix/dht/krpc/CompactIPv4NodeInfo.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import "github.com/anacrolix/missinggo/slices"
|
|
||||||
|
|
||||||
type (
|
|
||||||
CompactIPv4NodeInfo []NodeInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
func (CompactIPv4NodeInfo) ElemSize() int {
|
|
||||||
return 26
|
|
||||||
}
|
|
||||||
|
|
||||||
// func (me *CompactIPv4NodeInfo) Scrub() {
|
|
||||||
// slices.FilterInPlace(me, func(ni *NodeInfo) bool {
|
|
||||||
// ni.Addr.IP = ni.Addr.IP.To4()
|
|
||||||
// return ni.Addr.IP != nil
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (me CompactIPv4NodeInfo) MarshalBinary() ([]byte, error) {
|
|
||||||
return marshalBinarySlice(slices.Map(func(ni NodeInfo) NodeInfo {
|
|
||||||
ni.Addr.IP = ni.Addr.IP.To4()
|
|
||||||
return ni
|
|
||||||
}, me).(CompactIPv4NodeInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv4NodeInfo) MarshalBencode() ([]byte, error) {
|
|
||||||
return bencodeBytesResult(me.MarshalBinary())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv4NodeInfo) UnmarshalBinary(b []byte) error {
|
|
||||||
return unmarshalBinarySlice(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv4NodeInfo) UnmarshalBencode(b []byte) error {
|
|
||||||
return unmarshalBencodedBinary(me, b)
|
|
||||||
}
|
|
30
vendor/github.com/anacrolix/dht/krpc/CompactIPv6NodeAddrs.go
generated
vendored
30
vendor/github.com/anacrolix/dht/krpc/CompactIPv6NodeAddrs.go
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import "github.com/anacrolix/missinggo/slices"
|
|
||||||
|
|
||||||
type CompactIPv6NodeAddrs []NodeAddr
|
|
||||||
|
|
||||||
func (CompactIPv6NodeAddrs) ElemSize() int { return 18 }
|
|
||||||
|
|
||||||
func (me CompactIPv6NodeAddrs) MarshalBinary() ([]byte, error) {
|
|
||||||
return marshalBinarySlice(slices.Map(func(na NodeAddr) NodeAddr {
|
|
||||||
na.IP = na.IP.To16()
|
|
||||||
return na
|
|
||||||
}, me).(CompactIPv6NodeAddrs))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv6NodeAddrs) MarshalBencode() ([]byte, error) {
|
|
||||||
return bencodeBytesResult(me.MarshalBinary())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv6NodeAddrs) UnmarshalBinary(b []byte) error {
|
|
||||||
return unmarshalBinarySlice(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv6NodeAddrs) UnmarshalBencode(b []byte) error {
|
|
||||||
return unmarshalBencodedBinary(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv6NodeAddrs) NodeAddrs() []NodeAddr {
|
|
||||||
return me
|
|
||||||
}
|
|
32
vendor/github.com/anacrolix/dht/krpc/CompactIPv6NodeInfo.go
generated
vendored
32
vendor/github.com/anacrolix/dht/krpc/CompactIPv6NodeInfo.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/anacrolix/missinggo/slices"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
CompactIPv6NodeInfo []NodeInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
func (CompactIPv6NodeInfo) ElemSize() int {
|
|
||||||
return 38
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv6NodeInfo) MarshalBinary() ([]byte, error) {
|
|
||||||
return marshalBinarySlice(slices.Map(func(ni NodeInfo) NodeInfo {
|
|
||||||
ni.Addr.IP = ni.Addr.IP.To16()
|
|
||||||
return ni
|
|
||||||
}, me).(CompactIPv6NodeInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me CompactIPv6NodeInfo) MarshalBencode() ([]byte, error) {
|
|
||||||
return bencodeBytesResult(me.MarshalBinary())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv6NodeInfo) UnmarshalBinary(b []byte) error {
|
|
||||||
return unmarshalBinarySlice(me, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *CompactIPv6NodeInfo) UnmarshalBencode(b []byte) error {
|
|
||||||
return unmarshalBencodedBinary(me, b)
|
|
||||||
}
|
|
67
vendor/github.com/anacrolix/dht/krpc/compact_helpers.go
generated
vendored
67
vendor/github.com/anacrolix/dht/krpc/compact_helpers.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/anacrolix/missinggo/slices"
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
|
||||||
)
|
|
||||||
|
|
||||||
func unmarshalBencodedBinary(u encoding.BinaryUnmarshaler, b []byte) (err error) {
|
|
||||||
var ub string
|
|
||||||
err = bencode.Unmarshal(b, &ub)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return u.UnmarshalBinary([]byte(ub))
|
|
||||||
}
|
|
||||||
|
|
||||||
type elemSizer interface {
|
|
||||||
ElemSize() int
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalBinarySlice(slice elemSizer, b []byte) (err error) {
|
|
||||||
sliceValue := reflect.ValueOf(slice).Elem()
|
|
||||||
elemType := sliceValue.Type().Elem()
|
|
||||||
bytesPerElem := slice.ElemSize()
|
|
||||||
for len(b) != 0 {
|
|
||||||
if len(b) < bytesPerElem {
|
|
||||||
err = fmt.Errorf("%d trailing bytes < %d required for element", len(b), bytesPerElem)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
elem := reflect.New(elemType)
|
|
||||||
err = elem.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b[:bytesPerElem])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sliceValue.Set(reflect.Append(sliceValue, elem.Elem()))
|
|
||||||
b = b[bytesPerElem:]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func marshalBinarySlice(slice elemSizer) (ret []byte, err error) {
|
|
||||||
var elems []encoding.BinaryMarshaler
|
|
||||||
slices.MakeInto(&elems, slice)
|
|
||||||
for _, e := range elems {
|
|
||||||
var b []byte
|
|
||||||
b, err = e.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(b) != slice.ElemSize() {
|
|
||||||
panic(fmt.Sprintf("marshalled %d bytes, but expected %d", len(b), slice.ElemSize()))
|
|
||||||
}
|
|
||||||
ret = append(ret, b...)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func bencodeBytesResult(b []byte, err error) ([]byte, error) {
|
|
||||||
if err != nil {
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
return bencode.Marshal(b)
|
|
||||||
}
|
|
59
vendor/github.com/anacrolix/dht/krpc/error.go
generated
vendored
59
vendor/github.com/anacrolix/dht/krpc/error.go
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ErrorMethodUnknown = Error{
|
|
||||||
Code: 204,
|
|
||||||
Msg: "Method Unknown",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represented as a string or list in bencode.
|
|
||||||
type Error struct {
|
|
||||||
Code int
|
|
||||||
Msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ bencode.Unmarshaler = (*Error)(nil)
|
|
||||||
_ bencode.Marshaler = (*Error)(nil)
|
|
||||||
_ error = Error{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (e *Error) UnmarshalBencode(_b []byte) (err error) {
|
|
||||||
var _v interface{}
|
|
||||||
err = bencode.Unmarshal(_b, &_v)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch v := _v.(type) {
|
|
||||||
case []interface{}:
|
|
||||||
func() {
|
|
||||||
defer func() {
|
|
||||||
r := recover()
|
|
||||||
if r == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("unpacking %#v: %s", v, r)
|
|
||||||
}()
|
|
||||||
e.Code = int(v[0].(int64))
|
|
||||||
e.Msg = v[1].(string)
|
|
||||||
}()
|
|
||||||
case string:
|
|
||||||
e.Msg = v
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf(`KRPC error bencode value has unexpected type: %T`, _v)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e Error) MarshalBencode() (ret []byte, err error) {
|
|
||||||
return bencode.Marshal([]interface{}{e.Code, e.Msg})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e Error) Error() string {
|
|
||||||
return fmt.Sprintf("KRPC error %d: %s", e.Code, e.Msg)
|
|
||||||
}
|
|
36
vendor/github.com/anacrolix/dht/krpc/id.go
generated
vendored
36
vendor/github.com/anacrolix/dht/krpc/id.go
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ID [20]byte
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
bencode.Marshaler
|
|
||||||
bencode.Unmarshaler
|
|
||||||
} = (*ID)(nil)
|
|
||||||
|
|
||||||
func IdFromString(s string) (id ID) {
|
|
||||||
if n := copy(id[:], s); n != 20 {
|
|
||||||
panic(n)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (id *ID) MarshalBencode() ([]byte, error) {
|
|
||||||
return []byte("20:" + string(id[:])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (id *ID) UnmarshalBencode(b []byte) error {
|
|
||||||
var s string
|
|
||||||
if err := bencode.Unmarshal(b, &s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if n := copy(id[:], s); n != 20 {
|
|
||||||
return fmt.Errorf("string has wrong length: %d", n)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
85
vendor/github.com/anacrolix/dht/krpc/msg.go
generated
vendored
85
vendor/github.com/anacrolix/dht/krpc/msg.go
generated
vendored
@ -1,85 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Msg represents messages that nodes in the network send to each other as specified by the protocol.
|
|
||||||
// They are also refered to as the KRPC messages.
|
|
||||||
// There are three types of messages: QUERY, RESPONSE, ERROR
|
|
||||||
// The message is a dictonary that is then
|
|
||||||
// "bencoded" (serialization & compression format adopted by the BitTorrent)
|
|
||||||
// and sent via the UDP connection to peers.
|
|
||||||
//
|
|
||||||
// A KRPC message is a single dictionary with two keys common to every message and additional keys depending on the type of message.
|
|
||||||
// Every message has a key "t" with a string value representing a transaction ID.
|
|
||||||
// This transaction ID is generated by the querying node and is echoed in the response, so responses
|
|
||||||
// may be correlated with multiple queries to the same node. The transaction ID should be encoded as a short string of binary numbers, typically 2 characters are enough as they cover 2^16 outstanding queries. The other key contained in every KRPC message is "y" with a single character value describing the type of message. The value of the "y" key is one of "q" for query, "r" for response, or "e" for error.
|
|
||||||
// 3 message types: QUERY, RESPONSE, ERROR
|
|
||||||
type Msg struct {
|
|
||||||
Q string `bencode:"q,omitempty"` // Query method (one of 4: "ping", "find_node", "get_peers", "announce_peer")
|
|
||||||
A *MsgArgs `bencode:"a,omitempty"` // named arguments sent with a query
|
|
||||||
T string `bencode:"t"` // required: transaction ID
|
|
||||||
Y string `bencode:"y"` // required: type of the message: q for QUERY, r for RESPONSE, e for ERROR
|
|
||||||
R *Return `bencode:"r,omitempty"` // RESPONSE type only
|
|
||||||
E *Error `bencode:"e,omitempty"` // ERROR type only
|
|
||||||
IP NodeAddr `bencode:"ip,omitempty"`
|
|
||||||
ReadOnly bool `bencode:"ro,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type MsgArgs struct {
|
|
||||||
ID ID `bencode:"id"` // ID of the querying Node
|
|
||||||
InfoHash ID `bencode:"info_hash,omitempty"` // InfoHash of the torrent
|
|
||||||
Target ID `bencode:"target,omitempty"` // ID of the node sought
|
|
||||||
Token string `bencode:"token,omitempty"` // Token received from an earlier get_peers query
|
|
||||||
Port int `bencode:"port,omitempty"` // Senders torrent port
|
|
||||||
ImpliedPort bool `bencode:"implied_port,omitempty"` // Use senders apparent DHT port
|
|
||||||
Want []Want `bencode:"want,omitempty"` // Contains strings like "n4" and "n6" from BEP 32.
|
|
||||||
}
|
|
||||||
|
|
||||||
type Want string
|
|
||||||
|
|
||||||
const (
|
|
||||||
WantNodes Want = "n4"
|
|
||||||
WantNodes6 Want = "n6"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Return struct {
|
|
||||||
ID ID `bencode:"id"` // ID of the querying node
|
|
||||||
Nodes CompactIPv4NodeInfo `bencode:"nodes,omitempty"` // K closest nodes to the requested target
|
|
||||||
Nodes6 CompactIPv6NodeInfo `bencode:"nodes6,omitempty"` // K closest nodes to the requested target
|
|
||||||
Token string `bencode:"token,omitempty"` // Token for future announce_peer
|
|
||||||
Values []NodeAddr `bencode:"values,omitempty"` // Torrent peers
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fmt.Stringer = Msg{}
|
|
||||||
|
|
||||||
func (m Msg) String() string {
|
|
||||||
return fmt.Sprintf("%#v", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The node ID of the source of this Msg. Returns nil if it isn't present.
|
|
||||||
// TODO: Can we verify Msgs more aggressively so this is guaranteed to return
|
|
||||||
// a valid ID for a checked Msg?
|
|
||||||
func (m Msg) SenderID() *ID {
|
|
||||||
switch m.Y {
|
|
||||||
case "q":
|
|
||||||
if m.A == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &m.A.ID
|
|
||||||
case "r":
|
|
||||||
if m.R == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &m.R.ID
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Msg) Error() *Error {
|
|
||||||
if m.Y != "e" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return m.E
|
|
||||||
}
|
|
62
vendor/github.com/anacrolix/dht/krpc/nodeaddr.go
generated
vendored
62
vendor/github.com/anacrolix/dht/krpc/nodeaddr.go
generated
vendored
@ -1,62 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeAddr struct {
|
|
||||||
IP net.IP
|
|
||||||
Port int
|
|
||||||
}
|
|
||||||
|
|
||||||
// A zero Port is taken to mean no port provided, per BEP 7.
|
|
||||||
func (me NodeAddr) String() string {
|
|
||||||
if me.Port == 0 {
|
|
||||||
return me.IP.String()
|
|
||||||
}
|
|
||||||
return net.JoinHostPort(me.IP.String(), strconv.FormatInt(int64(me.Port), 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *NodeAddr) UnmarshalBinary(b []byte) error {
|
|
||||||
me.IP = make(net.IP, len(b)-2)
|
|
||||||
copy(me.IP, b[:len(b)-2])
|
|
||||||
me.Port = int(binary.BigEndian.Uint16(b[len(b)-2:]))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *NodeAddr) UnmarshalBencode(b []byte) (err error) {
|
|
||||||
var _b []byte
|
|
||||||
err = bencode.Unmarshal(b, &_b)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return me.UnmarshalBinary(_b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me NodeAddr) MarshalBinary() ([]byte, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
b.Write(me.IP)
|
|
||||||
binary.Write(&b, binary.BigEndian, uint16(me.Port))
|
|
||||||
return b.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me NodeAddr) MarshalBencode() ([]byte, error) {
|
|
||||||
return bencodeBytesResult(me.MarshalBinary())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me NodeAddr) UDP() *net.UDPAddr {
|
|
||||||
return &net.UDPAddr{
|
|
||||||
IP: me.IP,
|
|
||||||
Port: me.Port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *NodeAddr) FromUDPAddr(ua *net.UDPAddr) {
|
|
||||||
me.IP = ua.IP
|
|
||||||
me.Port = ua.Port
|
|
||||||
}
|
|
46
vendor/github.com/anacrolix/dht/krpc/nodeinfo.go
generated
vendored
46
vendor/github.com/anacrolix/dht/krpc/nodeinfo.go
generated
vendored
@ -1,46 +0,0 @@
|
|||||||
package krpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeInfo struct {
|
|
||||||
ID [20]byte
|
|
||||||
Addr NodeAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me NodeInfo) String() string {
|
|
||||||
return fmt.Sprintf("{%x at %s}", me.ID, me.Addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RandomNodeInfo(ipLen int) (ni NodeInfo) {
|
|
||||||
rand.Read(ni.ID[:])
|
|
||||||
ni.Addr.IP = make(net.IP, ipLen)
|
|
||||||
rand.Read(ni.Addr.IP)
|
|
||||||
ni.Addr.Port = rand.Intn(math.MaxUint16 + 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
encoding.BinaryMarshaler
|
|
||||||
encoding.BinaryUnmarshaler
|
|
||||||
} = (*NodeInfo)(nil)
|
|
||||||
|
|
||||||
func (ni NodeInfo) MarshalBinary() ([]byte, error) {
|
|
||||||
var w bytes.Buffer
|
|
||||||
w.Write(ni.ID[:])
|
|
||||||
w.Write(ni.Addr.IP)
|
|
||||||
binary.Write(&w, binary.BigEndian, uint16(ni.Addr.Port))
|
|
||||||
return w.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ni *NodeInfo) UnmarshalBinary(b []byte) error {
|
|
||||||
copy(ni.ID[:], b)
|
|
||||||
return ni.Addr.UnmarshalBinary(b[20:])
|
|
||||||
}
|
|
20
vendor/github.com/anacrolix/dht/misc.go
generated
vendored
20
vendor/github.com/anacrolix/dht/misc.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
func mustListen(addr string) net.PacketConn {
|
|
||||||
ret, err := net.ListenPacket("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func addrResolver(addr string) func() ([]Addr, error) {
|
|
||||||
return func() ([]Addr, error) {
|
|
||||||
ua, err := net.ResolveUDPAddr("udp", addr)
|
|
||||||
return []Addr{NewAddr(ua)}, err
|
|
||||||
}
|
|
||||||
}
|
|
57
vendor/github.com/anacrolix/dht/node.go
generated
vendored
57
vendor/github.com/anacrolix/dht/node.go
generated
vendored
@ -1,57 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nodeKey struct {
|
|
||||||
addr Addr
|
|
||||||
id int160
|
|
||||||
}
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
nodeKey
|
|
||||||
announceToken string
|
|
||||||
readOnly bool
|
|
||||||
|
|
||||||
lastGotQuery time.Time
|
|
||||||
lastGotResponse time.Time
|
|
||||||
|
|
||||||
consecutiveFailures int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) hasAddrAndID(addr Addr, id int160) bool {
|
|
||||||
return id == n.id && n.addr.String() == addr.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) IsSecure() bool {
|
|
||||||
return NodeIdSecure(n.id.AsByteArray(), n.addr.UDPAddr().IP)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) idString() string {
|
|
||||||
return n.id.ByteString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) NodeInfo() (ret krpc.NodeInfo) {
|
|
||||||
ret.Addr = n.addr.KRPC()
|
|
||||||
if n := copy(ret.ID[:], n.idString()); n != 20 {
|
|
||||||
panic(n)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Per the spec in BEP 5.
|
|
||||||
func (n *node) IsGood() bool {
|
|
||||||
if n.id.IsZero() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if time.Since(n.lastGotResponse) < 15*time.Minute {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if !n.lastGotResponse.IsZero() && time.Since(n.lastGotQuery) < 15*time.Minute {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
43
vendor/github.com/anacrolix/dht/nodes_file.go
generated
vendored
43
vendor/github.com/anacrolix/dht/nodes_file.go
generated
vendored
@ -1,43 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func WriteNodesToFile(ns []krpc.NodeInfo, fileName string) (err error) {
|
|
||||||
b, err := krpc.CompactIPv6NodeInfo(ns).MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(fileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
closeErr := f.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, err = f.Write(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReadNodesFromFile(fileName string) (ns []krpc.NodeInfo, err error) {
|
|
||||||
f, err := os.Open(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
b, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var cnis krpc.CompactIPv6NodeInfo
|
|
||||||
err = cnis.UnmarshalBinary(b)
|
|
||||||
ns = cnis
|
|
||||||
return
|
|
||||||
}
|
|
102
vendor/github.com/anacrolix/dht/security.go
generated
vendored
102
vendor/github.com/anacrolix/dht/security.go
generated
vendored
@ -1,102 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"hash/crc32"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
func maskForIP(ip net.IP) []byte {
|
|
||||||
switch {
|
|
||||||
case ip.To4() != nil:
|
|
||||||
return []byte{0x03, 0x0f, 0x3f, 0xff}
|
|
||||||
default:
|
|
||||||
return []byte{0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the CRC used to make or validate secure node ID.
|
|
||||||
func crcIP(ip net.IP, rand uint8) uint32 {
|
|
||||||
if ip4 := ip.To4(); ip4 != nil {
|
|
||||||
ip = ip4
|
|
||||||
}
|
|
||||||
// Copy IP so we can make changes. Go sux at this.
|
|
||||||
ip = append(make(net.IP, 0, len(ip)), ip...)
|
|
||||||
mask := maskForIP(ip)
|
|
||||||
for i := range mask {
|
|
||||||
ip[i] &= mask[i]
|
|
||||||
}
|
|
||||||
r := rand & 7
|
|
||||||
ip[0] |= r << 5
|
|
||||||
return crc32.Checksum(ip[:len(mask)], crc32.MakeTable(crc32.Castagnoli))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Makes a node ID secure, in-place. The ID is 20 raw bytes.
|
|
||||||
// http://www.libtorrent.org/dht_sec.html
|
|
||||||
func SecureNodeId(id *[20]byte, ip net.IP) {
|
|
||||||
crc := crcIP(ip, id[19])
|
|
||||||
id[0] = byte(crc >> 24 & 0xff)
|
|
||||||
id[1] = byte(crc >> 16 & 0xff)
|
|
||||||
id[2] = byte(crc>>8&0xf8) | id[2]&7
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns whether the node ID is considered secure. The id is the 20 raw
|
|
||||||
// bytes. http://www.libtorrent.org/dht_sec.html
|
|
||||||
func NodeIdSecure(id [20]byte, ip net.IP) bool {
|
|
||||||
if isLocalNetwork(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ip4 := ip.To4(); ip4 != nil {
|
|
||||||
ip = ip4
|
|
||||||
}
|
|
||||||
crc := crcIP(ip, id[19])
|
|
||||||
if id[0] != byte(crc>>24&0xff) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if id[1] != byte(crc>>16&0xff) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if id[2]&0xf8 != byte(crc>>8&0xf8) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
classA, classB, classC *net.IPNet
|
|
||||||
)
|
|
||||||
|
|
||||||
func mustParseCIDRIPNet(s string) *net.IPNet {
|
|
||||||
_, ret, err := net.ParseCIDR(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
classA = mustParseCIDRIPNet("10.0.0.0/8")
|
|
||||||
classB = mustParseCIDRIPNet("172.16.0.0/12")
|
|
||||||
classC = mustParseCIDRIPNet("192.168.0.0/16")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Per http://www.libtorrent.org/dht_sec.html#enforcement, the IP is
|
|
||||||
// considered a local network address and should be exempted from node ID
|
|
||||||
// verification.
|
|
||||||
func isLocalNetwork(ip net.IP) bool {
|
|
||||||
if classA.Contains(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if classB.Contains(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if classC.Contains(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ip.IsLinkLocalUnicast() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ip.IsLoopback() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
813
vendor/github.com/anacrolix/dht/server.go
generated
vendored
813
vendor/github.com/anacrolix/dht/server.go
generated
vendored
@ -1,813 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"text/tabwriter"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/anacrolix/missinggo"
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
|
||||||
"github.com/anacrolix/torrent/iplist"
|
|
||||||
"github.com/anacrolix/torrent/logonce"
|
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Server defines parameters for a DHT node server that is able to send
|
|
||||||
// queries, and respond to the ones from the network. Each node has a globally
|
|
||||||
// unique identifier known as the "node ID." Node IDs are chosen at random
|
|
||||||
// from the same 160-bit space as BitTorrent infohashes and define the
|
|
||||||
// behaviour of the node. Zero valued Server does not have a valid ID and thus
|
|
||||||
// is unable to function properly. Use `NewServer(nil)` to initialize a
|
|
||||||
// default node.
|
|
||||||
type Server struct {
|
|
||||||
id int160
|
|
||||||
socket net.PacketConn
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
transactions map[transactionKey]*Transaction
|
|
||||||
nextT uint64 // unique "t" field for outbound queries
|
|
||||||
table table
|
|
||||||
closed missinggo.Event
|
|
||||||
ipBlockList iplist.Ranger
|
|
||||||
tokenServer tokenServer // Manages tokens we issue to our queriers.
|
|
||||||
config ServerConfig
|
|
||||||
stats ServerStats
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) numGoodNodes() (num int) {
|
|
||||||
s.table.forNodes(func(n *node) bool {
|
|
||||||
if n.IsGood() {
|
|
||||||
num++
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func prettySince(t time.Time) string {
|
|
||||||
if t.IsZero() {
|
|
||||||
return "never"
|
|
||||||
}
|
|
||||||
d := time.Since(t)
|
|
||||||
d /= time.Second
|
|
||||||
d *= time.Second
|
|
||||||
return fmt.Sprintf("%s ago", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) WriteStatus(w io.Writer) {
|
|
||||||
fmt.Fprintf(w, "Listening on %s\n", s.Addr())
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
fmt.Fprintf(w, "Nodes in table: %d good, %d total\n", s.numGoodNodes(), s.numNodes())
|
|
||||||
fmt.Fprintf(w, "Ongoing transactions: %d\n", len(s.transactions))
|
|
||||||
fmt.Fprintf(w, "Server node ID: %x\n", s.id.Bytes())
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
|
|
||||||
fmt.Fprintf(tw, "b#\tnode id\taddr\tanntok\tlast query\tlast response\tcf\n")
|
|
||||||
for i, b := range s.table.buckets {
|
|
||||||
b.EachNode(func(n *node) bool {
|
|
||||||
fmt.Fprintf(tw, "%d\t%x\t%s\t%v\t%s\t%s\t%d\n",
|
|
||||||
i,
|
|
||||||
n.id.Bytes(),
|
|
||||||
n.addr,
|
|
||||||
len(n.announceToken),
|
|
||||||
prettySince(n.lastGotQuery),
|
|
||||||
prettySince(n.lastGotResponse),
|
|
||||||
n.consecutiveFailures,
|
|
||||||
)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
tw.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) numNodes() (num int) {
|
|
||||||
s.table.forNodes(func(n *node) bool {
|
|
||||||
num++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stats returns statistics for the server.
|
|
||||||
func (s *Server) Stats() ServerStats {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
ss := s.stats
|
|
||||||
ss.GoodNodes = s.numGoodNodes()
|
|
||||||
ss.Nodes = s.numNodes()
|
|
||||||
ss.OutstandingTransactions = len(s.transactions)
|
|
||||||
return ss
|
|
||||||
}
|
|
||||||
|
|
||||||
// Addr returns the listen address for the server. Packets arriving to this address
|
|
||||||
// are processed by the server (unless aliens are involved).
|
|
||||||
func (s *Server) Addr() net.Addr {
|
|
||||||
return s.socket.LocalAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServer initializes a new DHT node server.
|
|
||||||
func NewServer(c *ServerConfig) (s *Server, err error) {
|
|
||||||
if c == nil {
|
|
||||||
c = &ServerConfig{
|
|
||||||
Conn: mustListen(":0"),
|
|
||||||
NoSecurity: true,
|
|
||||||
StartingNodes: GlobalBootstrapAddrs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if missinggo.IsZeroValue(c.NodeId) {
|
|
||||||
c.NodeId = RandomNodeID()
|
|
||||||
if !c.NoSecurity && c.PublicIP != nil {
|
|
||||||
SecureNodeId(&c.NodeId, c.PublicIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s = &Server{
|
|
||||||
config: *c,
|
|
||||||
ipBlockList: c.IPBlocklist,
|
|
||||||
tokenServer: tokenServer{
|
|
||||||
maxIntervalDelta: 2,
|
|
||||||
interval: 5 * time.Minute,
|
|
||||||
secret: make([]byte, 20),
|
|
||||||
},
|
|
||||||
transactions: make(map[transactionKey]*Transaction),
|
|
||||||
table: table{
|
|
||||||
k: 8,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
rand.Read(s.tokenServer.secret)
|
|
||||||
s.socket = c.Conn
|
|
||||||
s.id = int160FromByteArray(c.NodeId)
|
|
||||||
s.table.rootID = s.id
|
|
||||||
go func() {
|
|
||||||
err := s.serve()
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if s.closed.IsSet() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a description of the Server. Python repr-style.
|
|
||||||
func (s *Server) String() string {
|
|
||||||
return fmt.Sprintf("dht server on %s", s.socket.LocalAddr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Packets to and from any address matching a range in the list are dropped.
|
|
||||||
func (s *Server) SetIPBlockList(list iplist.Ranger) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.ipBlockList = list
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) IPBlocklist() iplist.Ranger {
|
|
||||||
return s.ipBlockList
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) processPacket(b []byte, addr Addr) {
|
|
||||||
if len(b) < 2 || b[0] != 'd' || b[len(b)-1] != 'e' {
|
|
||||||
// KRPC messages are bencoded dicts.
|
|
||||||
readNotKRPCDict.Add(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var d krpc.Msg
|
|
||||||
err := bencode.Unmarshal(b, &d)
|
|
||||||
if err != nil {
|
|
||||||
readUnmarshalError.Add(1)
|
|
||||||
func() {
|
|
||||||
if se, ok := err.(*bencode.SyntaxError); ok {
|
|
||||||
// The message was truncated.
|
|
||||||
if int(se.Offset) == len(b) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Some messages seem to drop to nul chars abrubtly.
|
|
||||||
if int(se.Offset) < len(b) && b[se.Offset] == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// The message isn't bencode from the first.
|
|
||||||
if se.Offset == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if missinggo.CryHeard() {
|
|
||||||
// log.Printf("%s: received bad krpc message from %s: %s: %+q", s, addr, err, b)
|
|
||||||
// }
|
|
||||||
}()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if s.closed.IsSet() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var n *node
|
|
||||||
if sid := d.SenderID(); sid != nil {
|
|
||||||
n, _ = s.getNode(addr, int160FromByteArray(*sid), !d.ReadOnly)
|
|
||||||
if n != nil && d.ReadOnly {
|
|
||||||
n.readOnly = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d.Y == "q" {
|
|
||||||
readQuery.Add(1)
|
|
||||||
s.handleQuery(addr, d)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := s.findResponseTransaction(d.T, addr)
|
|
||||||
if t == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
go t.handleResponse(d)
|
|
||||||
if n != nil {
|
|
||||||
n.lastGotResponse = time.Now()
|
|
||||||
n.consecutiveFailures = 0
|
|
||||||
}
|
|
||||||
s.deleteTransaction(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) serve() error {
|
|
||||||
var b [0x10000]byte
|
|
||||||
for {
|
|
||||||
n, addr, err := s.socket.ReadFrom(b[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
read.Add(1)
|
|
||||||
if n == len(b) {
|
|
||||||
logonce.Stderr.Printf("received dht packet exceeds buffer size")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if missinggo.AddrPort(addr) == 0 {
|
|
||||||
readZeroPort.Add(1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
blocked := s.ipBlocked(missinggo.AddrIP(addr))
|
|
||||||
s.mu.Unlock()
|
|
||||||
if blocked {
|
|
||||||
readBlocked.Add(1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.processPacket(b[:n], NewAddr(addr.(*net.UDPAddr)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) ipBlocked(ip net.IP) (blocked bool) {
|
|
||||||
if s.ipBlockList == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, blocked = s.ipBlockList.Lookup(ip)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds directly to the node table.
|
|
||||||
func (s *Server) AddNode(ni krpc.NodeInfo) error {
|
|
||||||
id := int160FromByteArray(ni.ID)
|
|
||||||
if id.IsZero() {
|
|
||||||
return s.Ping(ni.Addr.UDP(), nil)
|
|
||||||
}
|
|
||||||
_, err := s.getNode(NewAddr(ni.Addr.UDP()), int160FromByteArray(ni.ID), true)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func wantsContain(ws []krpc.Want, w krpc.Want) bool {
|
|
||||||
for _, _w := range ws {
|
|
||||||
if _w == w {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldReturnNodes(queryWants []krpc.Want, querySource net.IP) bool {
|
|
||||||
if len(queryWants) != 0 {
|
|
||||||
return wantsContain(queryWants, krpc.WantNodes)
|
|
||||||
}
|
|
||||||
return querySource.To4() != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldReturnNodes6(queryWants []krpc.Want, querySource net.IP) bool {
|
|
||||||
if len(queryWants) != 0 {
|
|
||||||
return wantsContain(queryWants, krpc.WantNodes6)
|
|
||||||
}
|
|
||||||
return querySource.To4() == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) makeReturnNodes(target int160, filter func(krpc.NodeAddr) bool) []krpc.NodeInfo {
|
|
||||||
return s.closestGoodNodeInfos(8, target, filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) setReturnNodes(r *krpc.Return, queryMsg krpc.Msg, querySource Addr) {
|
|
||||||
target := int160FromByteArray(queryMsg.A.InfoHash)
|
|
||||||
if shouldReturnNodes(queryMsg.A.Want, querySource.UDPAddr().IP) {
|
|
||||||
r.Nodes = s.makeReturnNodes(target, func(na krpc.NodeAddr) bool { return na.IP.To4() != nil })
|
|
||||||
}
|
|
||||||
if shouldReturnNodes6(queryMsg.A.Want, querySource.UDPAddr().IP) {
|
|
||||||
r.Nodes6 = s.makeReturnNodes(target, func(krpc.NodeAddr) bool { return true })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Probably should write error messages back to senders if something is
|
|
||||||
// wrong.
|
|
||||||
func (s *Server) handleQuery(source Addr, m krpc.Msg) {
|
|
||||||
if m.SenderID() != nil {
|
|
||||||
if n, _ := s.getNode(source, int160FromByteArray(*m.SenderID()), !m.ReadOnly); n != nil {
|
|
||||||
n.lastGotQuery = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.config.OnQuery != nil {
|
|
||||||
propagate := s.config.OnQuery(&m, source.UDPAddr())
|
|
||||||
if !propagate {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Don't respond.
|
|
||||||
if s.config.Passive {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO: Should we disallow replying to ourself?
|
|
||||||
args := m.A
|
|
||||||
switch m.Q {
|
|
||||||
case "ping":
|
|
||||||
s.reply(source, m.T, krpc.Return{})
|
|
||||||
case "get_peers":
|
|
||||||
var r krpc.Return
|
|
||||||
// TODO: Return nodes.
|
|
||||||
s.setReturnNodes(&r, m, source)
|
|
||||||
r.Token = s.createToken(source)
|
|
||||||
s.reply(source, m.T, r)
|
|
||||||
case "find_node":
|
|
||||||
var r krpc.Return
|
|
||||||
s.setReturnNodes(&r, m, source)
|
|
||||||
s.reply(source, m.T, r)
|
|
||||||
case "announce_peer":
|
|
||||||
readAnnouncePeer.Add(1)
|
|
||||||
if !s.validToken(args.Token, source) {
|
|
||||||
expvars.Add("received announce_peer with invalid token", 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
expvars.Add("received announce_peer with valid token", 1)
|
|
||||||
if h := s.config.OnAnnouncePeer; h != nil {
|
|
||||||
p := Peer{
|
|
||||||
IP: source.UDPAddr().IP,
|
|
||||||
Port: args.Port,
|
|
||||||
}
|
|
||||||
if args.ImpliedPort {
|
|
||||||
p.Port = source.UDPAddr().Port
|
|
||||||
}
|
|
||||||
go h(metainfo.Hash(args.InfoHash), p)
|
|
||||||
}
|
|
||||||
s.reply(source, m.T, krpc.Return{})
|
|
||||||
default:
|
|
||||||
s.sendError(source, m.T, krpc.ErrorMethodUnknown)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) sendError(addr Addr, t string, e krpc.Error) {
|
|
||||||
m := krpc.Msg{
|
|
||||||
T: t,
|
|
||||||
Y: "e",
|
|
||||||
E: &e,
|
|
||||||
}
|
|
||||||
b, err := bencode.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = s.writeToNode(b, addr)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error replying to %s: %s", addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) reply(addr Addr, t string, r krpc.Return) {
|
|
||||||
expvars.Add("replied to peer", 1)
|
|
||||||
r.ID = s.id.AsByteArray()
|
|
||||||
m := krpc.Msg{
|
|
||||||
T: t,
|
|
||||||
Y: "r",
|
|
||||||
R: &r,
|
|
||||||
IP: addr.KRPC(),
|
|
||||||
}
|
|
||||||
b, err := bencode.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = s.writeToNode(b, addr)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error replying to %s: %s", addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the node if it's in the routing table, adding it if appropriate.
|
|
||||||
func (s *Server) getNode(addr Addr, id int160, tryAdd bool) (*node, error) {
|
|
||||||
if n := s.table.getNode(addr, id); n != nil {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
n := &node{nodeKey: nodeKey{
|
|
||||||
id: id,
|
|
||||||
addr: addr,
|
|
||||||
}}
|
|
||||||
// Check that the node would be good to begin with. (It might have a bad
|
|
||||||
// ID or banned address, or we fucked up the initial node field
|
|
||||||
// invariant.)
|
|
||||||
if err := s.nodeErr(n); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !tryAdd {
|
|
||||||
return nil, errors.New("node not present and add flag false")
|
|
||||||
}
|
|
||||||
b := s.table.bucketForID(id)
|
|
||||||
if b.Len() >= s.table.k {
|
|
||||||
if b.EachNode(func(n *node) bool {
|
|
||||||
if s.nodeIsBad(n) {
|
|
||||||
s.table.dropNode(n)
|
|
||||||
}
|
|
||||||
return b.Len() >= s.table.k
|
|
||||||
}) {
|
|
||||||
// No room.
|
|
||||||
return nil, errors.New("no room in bucket")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := s.table.addNode(n); err != nil {
|
|
||||||
panic(fmt.Sprintf("expected to add node: %s", err))
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) nodeIsBad(n *node) bool {
|
|
||||||
return s.nodeErr(n) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) nodeErr(n *node) error {
|
|
||||||
if n.id == s.id {
|
|
||||||
return errors.New("is self")
|
|
||||||
}
|
|
||||||
if n.id.IsZero() {
|
|
||||||
return errors.New("has zero id")
|
|
||||||
}
|
|
||||||
if !s.config.NoSecurity && !n.IsSecure() {
|
|
||||||
return errors.New("not secure")
|
|
||||||
}
|
|
||||||
if n.IsGood() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if n.consecutiveFailures >= 3 {
|
|
||||||
return fmt.Errorf("has %d consecutive failures", n.consecutiveFailures)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) writeToNode(b []byte, node Addr) (err error) {
|
|
||||||
if list := s.ipBlockList; list != nil {
|
|
||||||
if r, ok := list.Lookup(missinggo.AddrIP(node.UDPAddr())); ok {
|
|
||||||
err = fmt.Errorf("write to %s blocked: %s", node, r.Description)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// log.Printf("writing to %s: %q", node.UDPAddr(), b)
|
|
||||||
n, err := s.socket.WriteTo(b, node.UDPAddr())
|
|
||||||
writes.Add(1)
|
|
||||||
if err != nil {
|
|
||||||
writeErrors.Add(1)
|
|
||||||
err = fmt.Errorf("error writing %d bytes to %s: %s", len(b), node, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n != len(b) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) findResponseTransaction(transactionID string, sourceNode Addr) *Transaction {
|
|
||||||
return s.transactions[transactionKey{
|
|
||||||
sourceNode.String(),
|
|
||||||
transactionID}]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) nextTransactionID() string {
|
|
||||||
var b [binary.MaxVarintLen64]byte
|
|
||||||
n := binary.PutUvarint(b[:], s.nextT)
|
|
||||||
s.nextT++
|
|
||||||
return string(b[:n])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) deleteTransaction(t *Transaction) {
|
|
||||||
delete(s.transactions, t.key())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) deleteTransactionUnlocked(t *Transaction) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.deleteTransaction(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) addTransaction(t *Transaction) {
|
|
||||||
if _, ok := s.transactions[t.key()]; ok {
|
|
||||||
panic("transaction not unique")
|
|
||||||
}
|
|
||||||
s.transactions[t.key()] = t
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the 20-byte server ID. This is the ID used to communicate with the
|
|
||||||
// DHT network.
|
|
||||||
func (s *Server) ID() [20]byte {
|
|
||||||
return s.id.AsByteArray()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) createToken(addr Addr) string {
|
|
||||||
return s.tokenServer.CreateToken(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) validToken(token string, addr Addr) bool {
|
|
||||||
return s.tokenServer.ValidToken(token, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) query(addr Addr, q string, a *krpc.MsgArgs, callback func(krpc.Msg, error)) error {
|
|
||||||
tid := s.nextTransactionID()
|
|
||||||
if a == nil {
|
|
||||||
a = &krpc.MsgArgs{}
|
|
||||||
}
|
|
||||||
if callback == nil {
|
|
||||||
callback = func(krpc.Msg, error) {}
|
|
||||||
}
|
|
||||||
a.ID = s.ID()
|
|
||||||
m := krpc.Msg{
|
|
||||||
T: tid,
|
|
||||||
Y: "q",
|
|
||||||
Q: q,
|
|
||||||
A: a,
|
|
||||||
}
|
|
||||||
// BEP 43. Outgoing queries from passive nodes should contain "ro":1 in
|
|
||||||
// the top level dictionary.
|
|
||||||
if s.config.Passive {
|
|
||||||
m.ReadOnly = true
|
|
||||||
}
|
|
||||||
b, err := bencode.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var t *Transaction
|
|
||||||
t = &Transaction{
|
|
||||||
remoteAddr: addr,
|
|
||||||
t: tid,
|
|
||||||
querySender: func() error {
|
|
||||||
return s.writeToNode(b, addr)
|
|
||||||
},
|
|
||||||
onResponse: func(m krpc.Msg) {
|
|
||||||
go callback(m, nil)
|
|
||||||
go s.deleteTransactionUnlocked(t)
|
|
||||||
},
|
|
||||||
onTimeout: func() {
|
|
||||||
go callback(krpc.Msg{}, errors.New("query timed out"))
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.deleteTransaction(t)
|
|
||||||
for _, n := range s.table.addrNodes(addr) {
|
|
||||||
n.consecutiveFailures++
|
|
||||||
}
|
|
||||||
},
|
|
||||||
onSendError: func(err error) {
|
|
||||||
go callback(krpc.Msg{}, fmt.Errorf("error resending query: %s", err))
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.deleteTransaction(t)
|
|
||||||
for _, n := range s.table.addrNodes(addr) {
|
|
||||||
n.consecutiveFailures++
|
|
||||||
}
|
|
||||||
},
|
|
||||||
queryResendDelay: func() time.Duration {
|
|
||||||
if s.config.QueryResendDelay != nil {
|
|
||||||
return s.config.QueryResendDelay()
|
|
||||||
}
|
|
||||||
return defaultQueryResendDelay()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
s.stats.OutboundQueriesAttempted++
|
|
||||||
err = t.sendQuery()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// s.getNode(node, "").lastSentQuery = time.Now()
|
|
||||||
t.mu.Lock()
|
|
||||||
t.startResendTimer()
|
|
||||||
t.mu.Unlock()
|
|
||||||
s.addTransaction(t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a ping query to the address given.
|
|
||||||
func (s *Server) Ping(node *net.UDPAddr, callback func(krpc.Msg, error)) error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
return s.ping(node, callback)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) ping(node *net.UDPAddr, callback func(krpc.Msg, error)) error {
|
|
||||||
return s.query(NewAddr(node), "ping", nil, callback)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) announcePeer(node Addr, infoHash int160, port int, token string, impliedPort bool, callback func(krpc.Msg, error)) error {
|
|
||||||
if port == 0 && !impliedPort {
|
|
||||||
return errors.New("nothing to announce")
|
|
||||||
}
|
|
||||||
return s.query(node, "announce_peer", &krpc.MsgArgs{
|
|
||||||
ImpliedPort: impliedPort,
|
|
||||||
InfoHash: infoHash.AsByteArray(),
|
|
||||||
Port: port,
|
|
||||||
Token: token,
|
|
||||||
}, func(m krpc.Msg, err error) {
|
|
||||||
if callback != nil {
|
|
||||||
go callback(m, err)
|
|
||||||
}
|
|
||||||
if err := m.Error(); err != nil {
|
|
||||||
announceErrors.Add(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.stats.SuccessfulOutboundAnnouncePeerQueries++
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add response nodes to node table.
|
|
||||||
func (s *Server) addResponseNodes(d krpc.Msg) {
|
|
||||||
if d.R == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, cni := range d.R.Nodes {
|
|
||||||
s.getNode(NewAddr(cni.Addr.UDP()), int160FromByteArray(cni.ID), true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a find_node query to addr. targetID is the node we're looking for.
|
|
||||||
func (s *Server) findNode(addr Addr, targetID int160, callback func(krpc.Msg, error)) (err error) {
|
|
||||||
return s.query(addr, "find_node", &krpc.MsgArgs{
|
|
||||||
Target: targetID.AsByteArray(),
|
|
||||||
Want: []krpc.Want{krpc.WantNodes, krpc.WantNodes6},
|
|
||||||
}, func(m krpc.Msg, err error) {
|
|
||||||
// Scrape peers from the response to put in the server's table before
|
|
||||||
// handing the response back to the caller.
|
|
||||||
s.mu.Lock()
|
|
||||||
s.addResponseNodes(m)
|
|
||||||
s.mu.Unlock()
|
|
||||||
callback(m, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraversalStats struct {
|
|
||||||
NumAddrsTried int
|
|
||||||
NumResponses int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populates the node table.
|
|
||||||
func (s *Server) Bootstrap() (ts TraversalStats, err error) {
|
|
||||||
initialAddrs, err := s.traversalStartingAddrs()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var outstanding sync.WaitGroup
|
|
||||||
triedAddrs := newBloomFilterForTraversal()
|
|
||||||
var onAddr func(addr Addr)
|
|
||||||
onAddr = func(addr Addr) {
|
|
||||||
if triedAddrs.Test([]byte(addr.String())) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ts.NumAddrsTried++
|
|
||||||
outstanding.Add(1)
|
|
||||||
triedAddrs.AddString(addr.String())
|
|
||||||
s.findNode(addr, s.id, func(m krpc.Msg, err error) {
|
|
||||||
defer outstanding.Done()
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ts.NumResponses++
|
|
||||||
if r := m.R; r != nil {
|
|
||||||
for _, addr := range r.Nodes {
|
|
||||||
onAddr(NewAddr(addr.Addr.UDP()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
for _, addr := range initialAddrs {
|
|
||||||
onAddr(addr)
|
|
||||||
}
|
|
||||||
s.mu.Unlock()
|
|
||||||
outstanding.Wait()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns how many nodes are in the node table.
|
|
||||||
func (s *Server) NumNodes() int {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
return s.numNodes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exports the current node table.
|
|
||||||
func (s *Server) Nodes() (nis []krpc.NodeInfo) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.table.forNodes(func(n *node) bool {
|
|
||||||
nis = append(nis, krpc.NodeInfo{
|
|
||||||
Addr: n.addr.KRPC(),
|
|
||||||
ID: n.id.AsByteArray(),
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stops the server network activity. This is all that's required to clean-up a Server.
|
|
||||||
func (s *Server) Close() {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.closed.Set()
|
|
||||||
s.socket.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getPeers(addr Addr, infoHash int160, callback func(krpc.Msg, error)) (err error) {
|
|
||||||
return s.query(addr, "get_peers", &krpc.MsgArgs{
|
|
||||||
InfoHash: infoHash.AsByteArray(),
|
|
||||||
Want: []krpc.Want{krpc.WantNodes, krpc.WantNodes6},
|
|
||||||
}, func(m krpc.Msg, err error) {
|
|
||||||
go callback(m, err)
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.addResponseNodes(m)
|
|
||||||
if m.R != nil && m.R.Token != "" && m.SenderID() != nil {
|
|
||||||
if n, _ := s.getNode(addr, int160FromByteArray(*m.SenderID()), false); n != nil {
|
|
||||||
n.announceToken = m.R.Token
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) closestGoodNodeInfos(
|
|
||||||
k int,
|
|
||||||
targetID int160,
|
|
||||||
filter func(krpc.NodeAddr) bool,
|
|
||||||
) (
|
|
||||||
ret []krpc.NodeInfo,
|
|
||||||
) {
|
|
||||||
for _, n := range s.closestNodes(k, targetID, func(n *node) bool {
|
|
||||||
return n.IsGood() && filter(n.NodeInfo().Addr)
|
|
||||||
}) {
|
|
||||||
ret = append(ret, n.NodeInfo())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) closestNodes(k int, target int160, filter func(*node) bool) []*node {
|
|
||||||
return s.table.closestNodes(k, target, filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) traversalStartingAddrs() (addrs []Addr, err error) {
|
|
||||||
s.mu.RLock()
|
|
||||||
s.table.forNodes(func(n *node) bool {
|
|
||||||
addrs = append(addrs, n.addr)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
s.mu.RUnlock()
|
|
||||||
if len(addrs) > 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.config.StartingNodes != nil {
|
|
||||||
addrs, err = s.config.StartingNodes()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(addrs) == 0 {
|
|
||||||
err = errors.New("no initial nodes")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) AddNodesFromFile(fileName string) (added int, err error) {
|
|
||||||
ns, err := ReadNodesFromFile(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, n := range ns {
|
|
||||||
if s.AddNode(n) == nil {
|
|
||||||
added++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
117
vendor/github.com/anacrolix/dht/table.go
generated
vendored
117
vendor/github.com/anacrolix/dht/table.go
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// Node table, with indexes on distance from root ID to bucket, and node addr.
|
|
||||||
type table struct {
|
|
||||||
rootID int160
|
|
||||||
k int
|
|
||||||
buckets [160]bucket
|
|
||||||
addrs map[string]map[int160]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) addrNodes(addr Addr) []*node {
|
|
||||||
a := tbl.addrs[addr.String()]
|
|
||||||
ret := make([]*node, 0, len(a))
|
|
||||||
for id := range a {
|
|
||||||
ret = append(ret, tbl.getNode(addr, id))
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) dropNode(n *node) {
|
|
||||||
as := n.addr.String()
|
|
||||||
if _, ok := tbl.addrs[as][n.id]; !ok {
|
|
||||||
panic("missing id for addr")
|
|
||||||
}
|
|
||||||
delete(tbl.addrs[as], n.id)
|
|
||||||
if len(tbl.addrs[as]) == 0 {
|
|
||||||
delete(tbl.addrs, as)
|
|
||||||
}
|
|
||||||
b := tbl.bucketForID(n.id)
|
|
||||||
if _, ok := b.nodes[n]; !ok {
|
|
||||||
panic("expected node in bucket")
|
|
||||||
}
|
|
||||||
delete(b.nodes, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) bucketForID(id int160) *bucket {
|
|
||||||
return &tbl.buckets[tbl.bucketIndex(id)]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) numNodes() (num int) {
|
|
||||||
for _, b := range tbl.buckets {
|
|
||||||
num += b.Len()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) bucketIndex(id int160) int {
|
|
||||||
if id == tbl.rootID {
|
|
||||||
panic("nobody puts the root ID in a bucket")
|
|
||||||
}
|
|
||||||
var a int160
|
|
||||||
a.Xor(&tbl.rootID, &id)
|
|
||||||
index := 160 - a.BitLen()
|
|
||||||
return index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) forNodes(f func(*node) bool) bool {
|
|
||||||
for _, b := range tbl.buckets {
|
|
||||||
if !b.EachNode(f) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) getNode(addr Addr, id int160) *node {
|
|
||||||
if id == tbl.rootID {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return tbl.buckets[tbl.bucketIndex(id)].GetNode(addr, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) closestNodes(k int, target int160, filter func(*node) bool) (ret []*node) {
|
|
||||||
for bi := func() int {
|
|
||||||
if target == tbl.rootID {
|
|
||||||
return len(tbl.buckets) - 1
|
|
||||||
} else {
|
|
||||||
return tbl.bucketIndex(target)
|
|
||||||
}
|
|
||||||
}(); bi >= 0 && len(ret) < k; bi-- {
|
|
||||||
for n := range tbl.buckets[bi].nodes {
|
|
||||||
if filter(n) {
|
|
||||||
ret = append(ret, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO: Keep only the closest.
|
|
||||||
if len(ret) > k {
|
|
||||||
ret = ret[:k]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *table) addNode(n *node) error {
|
|
||||||
if n.id == tbl.rootID {
|
|
||||||
return errors.New("is root id")
|
|
||||||
}
|
|
||||||
b := &tbl.buckets[tbl.bucketIndex(n.id)]
|
|
||||||
if b.GetNode(n.addr, n.id) != nil {
|
|
||||||
return errors.New("already present")
|
|
||||||
}
|
|
||||||
if b.Len() >= tbl.k {
|
|
||||||
return errors.New("bucket is full")
|
|
||||||
}
|
|
||||||
b.AddNode(n, tbl.k)
|
|
||||||
if tbl.addrs == nil {
|
|
||||||
tbl.addrs = make(map[string]map[int160]struct{}, 160*tbl.k)
|
|
||||||
}
|
|
||||||
as := n.addr.String()
|
|
||||||
if tbl.addrs[as] == nil {
|
|
||||||
tbl.addrs[as] = make(map[int160]struct{}, 1)
|
|
||||||
}
|
|
||||||
tbl.addrs[as][n.id] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
57
vendor/github.com/anacrolix/dht/tokens.go
generated
vendored
57
vendor/github.com/anacrolix/dht/tokens.go
generated
vendored
@ -1,57 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha1"
|
|
||||||
"encoding/binary"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/bradfitz/iter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Manages creation and validation of tokens issued to querying nodes.
|
|
||||||
type tokenServer struct {
|
|
||||||
// Something only we know that peers can't guess, so they can't deduce valid tokens.
|
|
||||||
secret []byte
|
|
||||||
// How long between token changes.
|
|
||||||
interval time.Duration
|
|
||||||
// How many intervals may pass between the current interval, and one used to generate a token before it is invalid.
|
|
||||||
maxIntervalDelta int
|
|
||||||
timeNow func() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me tokenServer) CreateToken(addr Addr) string {
|
|
||||||
return me.createToken(addr, me.getTimeNow())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me tokenServer) createToken(addr Addr, t time.Time) string {
|
|
||||||
h := sha1.New()
|
|
||||||
ip := addr.UDPAddr().IP.To16()
|
|
||||||
if len(ip) != 16 {
|
|
||||||
panic(ip)
|
|
||||||
}
|
|
||||||
h.Write(ip)
|
|
||||||
ti := t.UnixNano() / int64(me.interval)
|
|
||||||
var b [8]byte
|
|
||||||
binary.BigEndian.PutUint64(b[:], uint64(ti))
|
|
||||||
h.Write(b[:])
|
|
||||||
h.Write(me.secret)
|
|
||||||
return string(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *tokenServer) ValidToken(token string, addr Addr) bool {
|
|
||||||
t := me.getTimeNow()
|
|
||||||
for range iter.N(me.maxIntervalDelta + 1) {
|
|
||||||
if me.createToken(addr, t) == token {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
t = t.Add(-me.interval)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *tokenServer) getTimeNow() time.Time {
|
|
||||||
if me.timeNow == nil {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
return me.timeNow()
|
|
||||||
}
|
|
72
vendor/github.com/anacrolix/dht/transaction.go
generated
vendored
72
vendor/github.com/anacrolix/dht/transaction.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
package dht
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/anacrolix/dht/krpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Transaction keeps track of a message exchange between nodes, such as a
|
|
||||||
// query message and a response message.
|
|
||||||
type Transaction struct {
|
|
||||||
remoteAddr Addr
|
|
||||||
t string
|
|
||||||
onResponse func(krpc.Msg)
|
|
||||||
onTimeout func()
|
|
||||||
onSendError func(error)
|
|
||||||
querySender func() error
|
|
||||||
queryResendDelay func() time.Duration
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
gotResponse bool
|
|
||||||
timer *time.Timer
|
|
||||||
retries int
|
|
||||||
lastSend time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transaction) handleResponse(m krpc.Msg) {
|
|
||||||
t.mu.Lock()
|
|
||||||
t.gotResponse = true
|
|
||||||
t.mu.Unlock()
|
|
||||||
t.onResponse(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transaction) key() transactionKey {
|
|
||||||
return transactionKey{
|
|
||||||
t.remoteAddr.String(),
|
|
||||||
t.t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transaction) startResendTimer() {
|
|
||||||
t.timer = time.AfterFunc(t.queryResendDelay(), t.resendCallback)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transaction) resendCallback() {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
if t.gotResponse {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if t.retries == 2 {
|
|
||||||
go t.onTimeout()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.retries++
|
|
||||||
if err := t.sendQuery(); err != nil {
|
|
||||||
go t.onSendError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if t.timer.Reset(t.queryResendDelay()) {
|
|
||||||
panic("timer should have fired to get here")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Transaction) sendQuery() error {
|
|
||||||
if err := t.querySender(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.lastSend = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
19
vendor/github.com/anacrolix/go-libutp/LICENSE
generated
vendored
19
vendor/github.com/anacrolix/go-libutp/LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2010-2013 BitTorrent, Inc.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
8
vendor/github.com/anacrolix/go-libutp/README.md
generated
vendored
8
vendor/github.com/anacrolix/go-libutp/README.md
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
# go-libutp
|
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/anacrolix/go-libutp?status.svg)](http://godoc.org/github.com/anacrolix/go-libutp)
|
|
||||||
[![CircleCI](https://circleci.com/gh/anacrolix/go-libutp.svg?style=shield)](https://circleci.com/gh/anacrolix/go-libutp)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/anacrolix/go-libutp)](https://goreportcard.com/report/github.com/anacrolix/go-libutp)
|
|
||||||
[![Appveyor Status](https://ci.appveyor.com/api/projects/status/github/anacrolix/go-libutp?branch=master&svg=true)](https://ci.appveyor.com/project/anacrolix/go-libutp)
|
|
||||||
|
|
||||||
This is a Go wrapper for [libutp](https://github.com/bittorrent/libutp).
|
|
21
vendor/github.com/anacrolix/go-libutp/appveyor.yml
generated
vendored
21
vendor/github.com/anacrolix/go-libutp/appveyor.yml
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
image:
|
|
||||||
- Visual Studio 2017
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\gopath
|
|
||||||
|
|
||||||
install:
|
|
||||||
- set PATH=%GOPATH%\bin;%PATH%
|
|
||||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
|
||||||
- go get github.com/anacrolix/envpprof
|
|
||||||
- go get github.com/anacrolix/tagflag
|
|
||||||
- go get github.com/stretchr/testify/assert
|
|
||||||
- go get github.com/anacrolix/mmsg
|
|
||||||
- go get golang.org/x/net/nettest
|
|
||||||
- go get github.com/anacrolix/sync
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- go build -v -x -a
|
|
||||||
|
|
||||||
before_test:
|
|
||||||
- go test -v
|
|
173
vendor/github.com/anacrolix/go-libutp/callbacks.go
generated
vendored
173
vendor/github.com/anacrolix/go-libutp/callbacks.go
generated
vendored
@ -1,173 +0,0 @@
|
|||||||
package utp
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include "utp.h"
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (a *C.utp_callback_arguments) bufBytes() []byte {
|
|
||||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
|
||||||
uintptr(unsafe.Pointer(a.buf)),
|
|
||||||
int(a.len),
|
|
||||||
int(a.len),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *C.utp_callback_arguments) state() C.int {
|
|
||||||
return *(*C.int)(unsafe.Pointer(&a.anon0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *C.utp_callback_arguments) error_code() C.int {
|
|
||||||
return *(*C.int)(unsafe.Pointer(&a.anon0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *C.utp_callback_arguments) address() *C.struct_sockaddr {
|
|
||||||
return *(**C.struct_sockaddr)(unsafe.Pointer(&a.anon0[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *C.utp_callback_arguments) addressLen() C.socklen_t {
|
|
||||||
return *(*C.socklen_t)(unsafe.Pointer(&a.anon1[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
var sends int64
|
|
||||||
|
|
||||||
//export sendtoCallback
|
|
||||||
func sendtoCallback(a *C.utp_callback_arguments) (ret C.uint64) {
|
|
||||||
s := getSocketForLibContext(a.context)
|
|
||||||
b := a.bufBytes()
|
|
||||||
addr := structSockaddrToUDPAddr(a.address())
|
|
||||||
newSends := atomic.AddInt64(&sends, 1)
|
|
||||||
if logCallbacks {
|
|
||||||
Logger.Printf("sending %d bytes, %d packets", len(b), newSends)
|
|
||||||
}
|
|
||||||
expMap.Add("socket PacketConn writes", 1)
|
|
||||||
n, err := s.pc.WriteTo(b, addr)
|
|
||||||
c := s.conns[a.socket]
|
|
||||||
if err != nil {
|
|
||||||
expMap.Add("socket PacketConn write errors", 1)
|
|
||||||
if c != nil && c.userOnError != nil {
|
|
||||||
go c.userOnError(err)
|
|
||||||
} else if c != nil &&
|
|
||||||
(strings.Contains(err.Error(), "can't assign requested address") ||
|
|
||||||
strings.Contains(err.Error(), "invalid argument")) {
|
|
||||||
// Should be an bad argument or network configuration problem we
|
|
||||||
// can't recover from.
|
|
||||||
c.onError(err)
|
|
||||||
} else if c != nil && strings.Contains(err.Error(), "operation not permitted") {
|
|
||||||
// Rate-limited. Probably Linux. The implementation might try
|
|
||||||
// again later.
|
|
||||||
} else {
|
|
||||||
Logger.Printf("error sending packet: %s", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n != len(b) {
|
|
||||||
expMap.Add("socket PacketConn short writes", 1)
|
|
||||||
Logger.Printf("expected to send %d bytes but only sent %d", len(b), n)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//export errorCallback
|
|
||||||
func errorCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
err := errorForCode(a.error_code())
|
|
||||||
if logCallbacks {
|
|
||||||
log.Printf("error callback: socket %p: %s", a.socket, err)
|
|
||||||
}
|
|
||||||
libContextToSocket[a.context].conns[a.socket].onError(err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
//export logCallback
|
|
||||||
func logCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
Logger.Printf("libutp: %s", C.GoString((*C.char)(unsafe.Pointer(a.buf))))
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
//export stateChangeCallback
|
|
||||||
func stateChangeCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
s := libContextToSocket[a.context]
|
|
||||||
c := s.conns[a.socket]
|
|
||||||
if logCallbacks {
|
|
||||||
Logger.Printf("state changed: conn %p: %s", c, libStateName(a.state()))
|
|
||||||
}
|
|
||||||
switch a.state() {
|
|
||||||
case C.UTP_STATE_CONNECT:
|
|
||||||
c.setConnected()
|
|
||||||
// A dialled connection will not tell the remote it's ready until it
|
|
||||||
// writes. If the dialer has no intention of writing, this will stall
|
|
||||||
// everything. We do an empty write to get things rolling again. This
|
|
||||||
// circumstance occurs when c1 in the RacyRead nettest is the dialer.
|
|
||||||
C.utp_write(a.socket, nil, 0)
|
|
||||||
case C.UTP_STATE_WRITABLE:
|
|
||||||
c.cond.Broadcast()
|
|
||||||
case C.UTP_STATE_EOF:
|
|
||||||
c.setGotEOF()
|
|
||||||
case C.UTP_STATE_DESTROYING:
|
|
||||||
c.onDestroyed()
|
|
||||||
s.onLibSocketDestroyed(a.socket)
|
|
||||||
default:
|
|
||||||
panic(a.state)
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
//export readCallback
|
|
||||||
func readCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
s := libContextToSocket[a.context]
|
|
||||||
c := s.conns[a.socket]
|
|
||||||
b := a.bufBytes()
|
|
||||||
if logCallbacks {
|
|
||||||
log.Printf("read callback: conn %p: %d bytes", c, len(b))
|
|
||||||
}
|
|
||||||
if len(b) == 0 {
|
|
||||||
panic("that will break the read drain invariant")
|
|
||||||
}
|
|
||||||
c.readBuf.Write(b)
|
|
||||||
c.cond.Broadcast()
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
//export acceptCallback
|
|
||||||
func acceptCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
if logCallbacks {
|
|
||||||
log.Printf("accept callback: %#v", *a)
|
|
||||||
}
|
|
||||||
s := getSocketForLibContext(a.context)
|
|
||||||
c := s.newConn(a.socket)
|
|
||||||
c.setRemoteAddr()
|
|
||||||
c.inited = true
|
|
||||||
s.pushBacklog(c)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
//export getReadBufferSizeCallback
|
|
||||||
func getReadBufferSizeCallback(a *C.utp_callback_arguments) (ret C.uint64) {
|
|
||||||
s := libContextToSocket[a.context]
|
|
||||||
c := s.conns[a.socket]
|
|
||||||
if c == nil {
|
|
||||||
// socket hasn't been added to the Socket.conns yet. The read buffer
|
|
||||||
// starts out empty, and the default implementation for this callback
|
|
||||||
// returns 0, so we'll return that.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
ret = C.uint64(c.readBuf.Len())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//export firewallCallback
|
|
||||||
func firewallCallback(a *C.utp_callback_arguments) C.uint64 {
|
|
||||||
s := getSocketForLibContext(a.context)
|
|
||||||
if s.block {
|
|
||||||
return 1
|
|
||||||
} else {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
327
vendor/github.com/anacrolix/go-libutp/conn.go
generated
vendored
327
vendor/github.com/anacrolix/go-libutp/conn.go
generated
vendored
@ -1,327 +0,0 @@
|
|||||||
package utp
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include "utp.h"
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"runtime/pprof"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrConnClosed = errors.New("closed")
|
|
||||||
errConnDestroyed = errors.New("destroyed")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Conn struct {
|
|
||||||
s *Socket
|
|
||||||
us *C.utp_socket
|
|
||||||
cond sync.Cond
|
|
||||||
readBuf bytes.Buffer
|
|
||||||
gotEOF bool
|
|
||||||
gotConnect bool
|
|
||||||
// Set on state changed to UTP_STATE_DESTROYING. Not valid to refer to the
|
|
||||||
// socket after getting this.
|
|
||||||
destroyed bool
|
|
||||||
// Conn.Close was called.
|
|
||||||
closed bool
|
|
||||||
// Corresponds to utp_socket.state != CS_UNITIALIZED. This requires the
|
|
||||||
// utp_socket was obtained from the accept callback, or has had
|
|
||||||
// utp_connect called on it. We can't call utp_close until it's true.
|
|
||||||
inited bool
|
|
||||||
|
|
||||||
err error
|
|
||||||
|
|
||||||
writeDeadline time.Time
|
|
||||||
writeDeadlineTimer *time.Timer
|
|
||||||
readDeadline time.Time
|
|
||||||
readDeadlineTimer *time.Timer
|
|
||||||
|
|
||||||
numBytesRead int64
|
|
||||||
numBytesWritten int64
|
|
||||||
|
|
||||||
localAddr net.Addr
|
|
||||||
remoteAddr net.Addr
|
|
||||||
|
|
||||||
// Called for non-fatal errors, such as packet write errors.
|
|
||||||
userOnError func(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) onError(err error) {
|
|
||||||
c.err = err
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) setConnected() {
|
|
||||||
c.gotConnect = true
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) waitForConnect(ctx context.Context) error {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
if c.closed {
|
|
||||||
return ErrConnClosed
|
|
||||||
}
|
|
||||||
if c.err != nil {
|
|
||||||
return c.err
|
|
||||||
}
|
|
||||||
if c.gotConnect {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
c.cond.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) Close() error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
c.close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) close() {
|
|
||||||
if c.inited && !c.destroyed && !c.closed {
|
|
||||||
C.utp_close(c.us)
|
|
||||||
}
|
|
||||||
if !c.inited {
|
|
||||||
// We'll never receive a destroy message, so we should remove it now.
|
|
||||||
delete(c.s.conns, c.us)
|
|
||||||
}
|
|
||||||
c.closed = true
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) LocalAddr() net.Addr {
|
|
||||||
return c.localAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) readNoWait(b []byte) (n int, err error) {
|
|
||||||
n, _ = c.readBuf.Read(b)
|
|
||||||
if n != 0 && c.readBuf.Len() == 0 {
|
|
||||||
// Can we call this if the utp_socket is closed, destroyed or errored?
|
|
||||||
if c.us != nil {
|
|
||||||
C.utp_read_drained(c.us)
|
|
||||||
// C.utp_issue_deferred_acks(C.utp_get_context(c.s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.readBuf.Len() != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = func() error {
|
|
||||||
switch {
|
|
||||||
case c.gotEOF:
|
|
||||||
return io.EOF
|
|
||||||
case c.err != nil:
|
|
||||||
return c.err
|
|
||||||
case c.destroyed:
|
|
||||||
return errConnDestroyed
|
|
||||||
case c.closed:
|
|
||||||
return errors.New("closed")
|
|
||||||
case !c.readDeadline.IsZero() && !time.Now().Before(c.readDeadline):
|
|
||||||
return errDeadlineExceeded{}
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) Read(b []byte) (int, error) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
for {
|
|
||||||
n, err := c.readNoWait(b)
|
|
||||||
c.numBytesRead += int64(n)
|
|
||||||
// log.Printf("read %d bytes", c.numBytesRead)
|
|
||||||
if n != 0 || len(b) == 0 || err != nil {
|
|
||||||
// log.Printf("conn %p: read %d bytes: %s", c, n, err)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
c.cond.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) writeNoWait(b []byte) (n int, err error) {
|
|
||||||
err = func() error {
|
|
||||||
switch {
|
|
||||||
case c.err != nil:
|
|
||||||
return c.err
|
|
||||||
case c.closed:
|
|
||||||
return ErrConnClosed
|
|
||||||
case c.destroyed:
|
|
||||||
return errConnDestroyed
|
|
||||||
case !c.writeDeadline.IsZero() && !time.Now().Before(c.writeDeadline):
|
|
||||||
return errDeadlineExceeded{}
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pprof.Do(context.Background(), pprof.Labels("cgo", "utp_write"), func(context.Context) {
|
|
||||||
n = int(C.utp_write(c.us, unsafe.Pointer(&b[0]), C.size_t(len(b))))
|
|
||||||
})
|
|
||||||
if n < 0 {
|
|
||||||
panic(n)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) Write(b []byte) (n int, err error) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
for len(b) != 0 {
|
|
||||||
var n1 int
|
|
||||||
n1, err = c.writeNoWait(b)
|
|
||||||
b = b[n1:]
|
|
||||||
n += n1
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if n1 != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.cond.Wait()
|
|
||||||
}
|
|
||||||
c.numBytesWritten += int64(n)
|
|
||||||
// log.Printf("wrote %d bytes", c.numBytesWritten)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) setRemoteAddr() {
|
|
||||||
var rsa syscall.RawSockaddrAny
|
|
||||||
var addrlen C.socklen_t = C.socklen_t(unsafe.Sizeof(rsa))
|
|
||||||
C.utp_getpeername(c.us, (*C.struct_sockaddr)(unsafe.Pointer(&rsa)), &addrlen)
|
|
||||||
sa, err := anyToSockaddr(&rsa)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
c.remoteAddr = sockaddrToUDP(sa)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) RemoteAddr() net.Addr {
|
|
||||||
return c.remoteAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) SetDeadline(t time.Time) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
c.readDeadline = t
|
|
||||||
c.writeDeadline = t
|
|
||||||
if t.IsZero() {
|
|
||||||
c.readDeadlineTimer.Stop()
|
|
||||||
c.writeDeadlineTimer.Stop()
|
|
||||||
} else {
|
|
||||||
d := t.Sub(time.Now())
|
|
||||||
c.readDeadlineTimer.Reset(d)
|
|
||||||
c.writeDeadlineTimer.Reset(d)
|
|
||||||
}
|
|
||||||
c.cond.Broadcast()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
c.readDeadline = t
|
|
||||||
if t.IsZero() {
|
|
||||||
c.readDeadlineTimer.Stop()
|
|
||||||
} else {
|
|
||||||
d := t.Sub(time.Now())
|
|
||||||
c.readDeadlineTimer.Reset(d)
|
|
||||||
}
|
|
||||||
c.cond.Broadcast()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
c.writeDeadline = t
|
|
||||||
if t.IsZero() {
|
|
||||||
c.writeDeadlineTimer.Stop()
|
|
||||||
} else {
|
|
||||||
d := t.Sub(time.Now())
|
|
||||||
c.writeDeadlineTimer.Reset(d)
|
|
||||||
}
|
|
||||||
c.cond.Broadcast()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) setGotEOF() {
|
|
||||||
c.gotEOF = true
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) onDestroyed() {
|
|
||||||
c.destroyed = true
|
|
||||||
c.us = nil
|
|
||||||
c.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) WriteBufferLen() int {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return int(C.utp_getsockopt(c.us, C.UTP_SNDBUF))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) SetWriteBufferLen(len int) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
i := C.utp_setsockopt(c.us, C.UTP_SNDBUF, C.int(len))
|
|
||||||
if i != 0 {
|
|
||||||
panic(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect an unconnected Conn (obtained through Socket.NewConn).
|
|
||||||
func (c *Conn) Connect(ctx context.Context, network, addr string) error {
|
|
||||||
if network == "" {
|
|
||||||
network = c.localAddr.Network()
|
|
||||||
}
|
|
||||||
ua, err := resolveAddr(network, addr)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error resolving address: %v", err)
|
|
||||||
}
|
|
||||||
sa, sl := netAddrToLibSockaddr(ua)
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
if c.s.closed {
|
|
||||||
return errSocketClosed
|
|
||||||
}
|
|
||||||
if n := C.utp_connect(c.us, sa, sl); n != 0 {
|
|
||||||
panic(n)
|
|
||||||
}
|
|
||||||
c.inited = true
|
|
||||||
c.setRemoteAddr()
|
|
||||||
err = c.waitForConnect(ctx)
|
|
||||||
if err != nil {
|
|
||||||
c.close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) OnError(f func(error)) {
|
|
||||||
mu.Lock()
|
|
||||||
c.userOnError = f
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
11
vendor/github.com/anacrolix/go-libutp/deadline.go
generated
vendored
11
vendor/github.com/anacrolix/go-libutp/deadline.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
package utp
|
|
||||||
|
|
||||||
import "net"
|
|
||||||
|
|
||||||
type errDeadlineExceeded struct{}
|
|
||||||
|
|
||||||
var _ net.Error = errDeadlineExceeded{}
|
|
||||||
|
|
||||||
func (errDeadlineExceeded) Error() string { return "deadline exceeded" }
|
|
||||||
func (errDeadlineExceeded) Temporary() bool { return false }
|
|
||||||
func (errDeadlineExceeded) Timeout() bool { return true }
|
|
14
vendor/github.com/anacrolix/go-libutp/expvars.go
generated
vendored
14
vendor/github.com/anacrolix/go-libutp/expvars.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
package utp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"expvar"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
expMap = expvar.NewMap("go-libutp")
|
|
||||||
socketUtpPacketsReceived = expvar.NewInt("utpSocketUtpPacketsReceived")
|
|
||||||
socketNonUtpPacketsReceived = expvar.NewInt("utpSocketNonUtpPacketsReceived")
|
|
||||||
nonUtpPacketsDropped = expvar.NewInt("utpNonUtpPacketsDropped")
|
|
||||||
multiMsgRecvs = expvar.NewInt("utpMultiMsgRecvs")
|
|
||||||
singleMsgRecvs = expvar.NewInt("utpSingleMsgRecvs")
|
|
||||||
)
|
|
16
vendor/github.com/anacrolix/go-libutp/go.mod
generated
vendored
16
vendor/github.com/anacrolix/go-libutp/go.mod
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
module github.com/anacrolix/go-libutp
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df
|
|
||||||
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2
|
|
||||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e
|
|
||||||
github.com/huandu/xstrings v1.0.0
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/stretchr/testify v1.2.1
|
|
||||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79
|
|
||||||
)
|
|
36
vendor/github.com/anacrolix/go-libutp/go.sum
generated
vendored
36
vendor/github.com/anacrolix/go-libutp/go.sum
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa h1:xCaATLKmn39QqLs3tUZYr6eKvezJV+FYvVOLTklxK6U=
|
|
||||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff h1:c2Hrwd+LaVrsLJQ5nV8dAP4z6iM0G6aewsbrj3x515s=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df h1:+se8qhX5ivmSCkP+gZXyFx2ETjk1pmnrYJ0Iyc+hZKY=
|
|
||||||
github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
|
|
||||||
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb h1:2Or5ccMoY4Kfao+WdL2w6tpY6ZEe+2VTVbIPd7A/Ajk=
|
|
||||||
github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180725074606-fda11526ff08 h1:w0QQ3EPXGMOFLBRvBk7vCeNkfreNQ5xhTzUIDIEdNm0=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180725074606-fda11526ff08/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778 h1:XpCDEixzXOB8yaTW/4YBzKrJdMcFI0DzpPTYNv75wzk=
|
|
||||||
github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0 h1:xcd2GmlPWBsGNjdbwriHXvJJtagl1AnbjTPhJTksJDQ=
|
|
||||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2 h1:1B/+1BcRhOMG1KH/YhNIU8OppSWk5d/NGyfRla88CuY=
|
|
||||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e h1:Fw7ZmgiklsLh5EQWyHh1sumKSCG1+yjEctIpGKib87s=
|
|
||||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk=
|
|
||||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
|
||||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
|
||||||
github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U=
|
|
||||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
|
||||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79 h1:1FDlG4HI84rVePw1/0E/crL5tt2N+1blLJpY6UZ6krs=
|
|
||||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
37
vendor/github.com/anacrolix/go-libutp/libapi.go
generated
vendored
37
vendor/github.com/anacrolix/go-libutp/libapi.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
package utp
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include "utp.h"
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/anacrolix/sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Option = C.int
|
|
||||||
|
|
||||||
const (
|
|
||||||
LogNormal Option = C.UTP_LOG_NORMAL
|
|
||||||
LogMtu Option = C.UTP_LOG_MTU
|
|
||||||
LogDebug Option = C.UTP_LOG_DEBUG
|
|
||||||
SendBuffer Option = C.UTP_SNDBUF
|
|
||||||
RecvBuffer Option = C.UTP_RCVBUF
|
|
||||||
TargetDelay Option = C.UTP_TARGET_DELAY
|
|
||||||
|
|
||||||
TimedOut = C.UTP_ETIMEDOUT
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
mu sync.Mutex
|
|
||||||
libContextToSocket = map[*C.utp_context]*Socket{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func getSocketForLibContext(uc *C.utp_context) *Socket {
|
|
||||||
return libContextToSocket[uc]
|
|
||||||
}
|
|
||||||
|
|
||||||
func errorForCode(code C.int) error {
|
|
||||||
return errors.New(libErrorCodeNames(code))
|
|
||||||
}
|
|
65
vendor/github.com/anacrolix/go-libutp/libutp-2012.vcxproj.filters
generated
vendored
65
vendor/github.com/anacrolix/go-libutp/libutp-2012.vcxproj.filters
generated
vendored
@ -1,65 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
|
||||||
<ItemGroup>
|
|
||||||
<Filter Include="Source Files">
|
|
||||||
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
|
|
||||||
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
|
|
||||||
</Filter>
|
|
||||||
<Filter Include="Header Files">
|
|
||||||
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
|
|
||||||
<Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
|
|
||||||
</Filter>
|
|
||||||
</ItemGroup>
|
|
||||||
<ItemGroup>
|
|
||||||
<ClInclude Include="utp_templates.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_callbacks.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_hash.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_internal.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_packedsockaddr.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_utils.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp_types.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="utp.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
<ClInclude Include="libutp_inet_ntop.h">
|
|
||||||
<Filter>Header Files</Filter>
|
|
||||||
</ClInclude>
|
|
||||||
</ItemGroup>
|
|
||||||
<ItemGroup>
|
|
||||||
<ClCompile Include="utp_api.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="utp_callbacks.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="utp_hash.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="utp_internal.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="utp_packedsockaddr.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="utp_utils.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
<ClCompile Include="libutp_inet_ntop.cpp">
|
|
||||||
<Filter>Source Files</Filter>
|
|
||||||
</ClCompile>
|
|
||||||
</ItemGroup>
|
|
||||||
</Project>
|
|
258
vendor/github.com/anacrolix/go-libutp/libutp.vcxproj
generated
vendored
258
vendor/github.com/anacrolix/go-libutp/libutp.vcxproj
generated
vendored
@ -1,258 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
|
||||||
<ItemGroup Label="ProjectConfigurations">
|
|
||||||
<ProjectConfiguration Include="Debug|Win32">
|
|
||||||
<Configuration>Debug</Configuration>
|
|
||||||
<Platform>Win32</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="Debug|x64">
|
|
||||||
<Configuration>Debug</Configuration>
|
|
||||||
<Platform>x64</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="Release|Win32">
|
|
||||||
<Configuration>Release</Configuration>
|
|
||||||
<Platform>Win32</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="Release|x64">
|
|
||||||
<Configuration>Release</Configuration>
|
|
||||||
<Platform>x64</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="WinRTDebug|Win32">
|
|
||||||
<Configuration>WinRTDebug</Configuration>
|
|
||||||
<Platform>Win32</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="WinRTDebug|x64">
|
|
||||||
<Configuration>WinRTDebug</Configuration>
|
|
||||||
<Platform>x64</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="WinRTRelease|Win32">
|
|
||||||
<Configuration>WinRTRelease</Configuration>
|
|
||||||
<Platform>Win32</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
<ProjectConfiguration Include="WinRTRelease|x64">
|
|
||||||
<Configuration>WinRTRelease</Configuration>
|
|
||||||
<Platform>x64</Platform>
|
|
||||||
</ProjectConfiguration>
|
|
||||||
</ItemGroup>
|
|
||||||
<ItemGroup>
|
|
||||||
<ClInclude Include="utp_templates.h" />
|
|
||||||
<ClInclude Include="utp.h" />
|
|
||||||
<ClInclude Include="utp_callbacks.h" />
|
|
||||||
<ClInclude Include="utp_hash.h" />
|
|
||||||
<ClInclude Include="utp_internal.h" />
|
|
||||||
<ClInclude Include="utp_packedsockaddr.h" />
|
|
||||||
<ClInclude Include="utp_utils.h" />
|
|
||||||
<ClInclude Include="utp_types.h" />
|
|
||||||
<ClInclude Include="libutp_inet_ntop.h" />
|
|
||||||
</ItemGroup>
|
|
||||||
<ItemGroup>
|
|
||||||
<ClCompile Include="libutp_inet_ntop.cpp" />
|
|
||||||
<ClCompile Include="utp_api.cpp" />
|
|
||||||
<ClCompile Include="utp_callbacks.cpp" />
|
|
||||||
<ClCompile Include="utp_hash.cpp" />
|
|
||||||
<ClCompile Include="utp_internal.cpp" />
|
|
||||||
<ClCompile Include="utp_packedsockaddr.cpp" />
|
|
||||||
<ClCompile Include="utp_utils.cpp" />
|
|
||||||
</ItemGroup>
|
|
||||||
<PropertyGroup Label="Globals">
|
|
||||||
<ProjectGuid>{5984D5CD-6ADD-4EB7-82E7-A555888FBBBD}</ProjectGuid>
|
|
||||||
<RootNamespace>libutp2012</RootNamespace>
|
|
||||||
<ProjectName>libutp</ProjectName>
|
|
||||||
</PropertyGroup>
|
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>true</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v140_xp</PlatformToolset>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|Win32'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>true</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v120_xp</PlatformToolset>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>true</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v120</PlatformToolset>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|x64'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>true</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v120</PlatformToolset>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>false</UseDebugLibraries>
|
|
||||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
<PlatformToolset>v140_xp</PlatformToolset>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|Win32'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>false</UseDebugLibraries>
|
|
||||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
<PlatformToolset>v120_xp</PlatformToolset>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>false</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v120</PlatformToolset>
|
|
||||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|x64'" Label="Configuration">
|
|
||||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
|
||||||
<UseDebugLibraries>false</UseDebugLibraries>
|
|
||||||
<PlatformToolset>v120</PlatformToolset>
|
|
||||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
|
||||||
<CharacterSet>Unicode</CharacterSet>
|
|
||||||
</PropertyGroup>
|
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
|
||||||
<ImportGroup Label="ExtensionSettings">
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\win32-2012.props" />
|
|
||||||
<Import Project="prop_sheets\debug-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|Win32'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\win32-2012.props" />
|
|
||||||
<Import Project="prop_sheets\debug-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\x64-2012.props" />
|
|
||||||
<Import Project="prop_sheets\debug-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|x64'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\x64-2012.props" />
|
|
||||||
<Import Project="prop_sheets\debug-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\win32-2012.props" />
|
|
||||||
<Import Project="prop_sheets\release-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|Win32'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\win32-2012.props" />
|
|
||||||
<Import Project="prop_sheets\release-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\x64-2012.props" />
|
|
||||||
<Import Project="prop_sheets\release-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|x64'" Label="PropertySheets">
|
|
||||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
|
||||||
<Import Project="prop_sheets\x64-2012.props" />
|
|
||||||
<Import Project="prop_sheets\release-2012.props" />
|
|
||||||
</ImportGroup>
|
|
||||||
<PropertyGroup Label="UserMacros" />
|
|
||||||
<PropertyGroup />
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>Disabled</Optimization>
|
|
||||||
<ExceptionHandling>Sync</ExceptionHandling>
|
|
||||||
<DisableSpecificWarnings>4996</DisableSpecificWarnings>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|Win32'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>Disabled</Optimization>
|
|
||||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>Disabled</Optimization>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='WinRTDebug|x64'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>Disabled</Optimization>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>MaxSpeed</Optimization>
|
|
||||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
|
||||||
<PreprocessorDefinitions>_WIN32_WINNT=0x501;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
|
||||||
<ExceptionHandling>Sync</ExceptionHandling>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
|
||||||
<OptimizeReferences>true</OptimizeReferences>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|Win32'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>MaxSpeed</Optimization>
|
|
||||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
|
||||||
<PreprocessorDefinitions>%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
|
||||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
|
||||||
<OptimizeReferences>true</OptimizeReferences>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>MaxSpeed</Optimization>
|
|
||||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
|
||||||
<OptimizeReferences>true</OptimizeReferences>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='WinRTRelease|x64'">
|
|
||||||
<ClCompile>
|
|
||||||
<WarningLevel>Level3</WarningLevel>
|
|
||||||
<Optimization>MaxSpeed</Optimization>
|
|
||||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
|
||||||
</ClCompile>
|
|
||||||
<Link>
|
|
||||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
|
||||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
|
||||||
<OptimizeReferences>true</OptimizeReferences>
|
|
||||||
</Link>
|
|
||||||
</ItemDefinitionGroup>
|
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
|
||||||
<ImportGroup Label="ExtensionTargets">
|
|
||||||
</ImportGroup>
|
|
||||||
</Project>
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user