This commit is contained in:
2018-11-04 15:58:15 +01:00
commit f956bcee28
1178 changed files with 584552 additions and 0 deletions

View File

@@ -0,0 +1,93 @@
package storage
import (
"encoding/binary"
"os"
"path/filepath"
"time"
"github.com/boltdb/bolt"
"github.com/anacrolix/torrent/metainfo"
)
const (
boltDbCompleteValue = "c"
boltDbIncompleteValue = "i"
)
var (
completionBucketKey = []byte("completion")
)
type boltPieceCompletion struct {
db *bolt.DB
}
var _ PieceCompletion = (*boltPieceCompletion)(nil)
func NewBoltPieceCompletion(dir string) (ret PieceCompletion, err error) {
os.MkdirAll(dir, 0770)
p := filepath.Join(dir, ".torrent.bolt.db")
db, err := bolt.Open(p, 0660, &bolt.Options{
Timeout: time.Second,
})
if err != nil {
return
}
db.NoSync = true
ret = &boltPieceCompletion{db}
return
}
func (me boltPieceCompletion) Get(pk metainfo.PieceKey) (cn Completion, err error) {
err = me.db.View(func(tx *bolt.Tx) error {
cb := tx.Bucket(completionBucketKey)
if cb == nil {
return nil
}
ih := cb.Bucket(pk.InfoHash[:])
if ih == nil {
return nil
}
var key [4]byte
binary.BigEndian.PutUint32(key[:], uint32(pk.Index))
cn.Ok = true
switch string(ih.Get(key[:])) {
case boltDbCompleteValue:
cn.Complete = true
case boltDbIncompleteValue:
cn.Complete = false
default:
cn.Ok = false
}
return nil
})
return
}
func (me boltPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
return me.db.Update(func(tx *bolt.Tx) error {
c, err := tx.CreateBucketIfNotExists(completionBucketKey)
if err != nil {
return err
}
ih, err := c.CreateBucketIfNotExists(pk.InfoHash[:])
if err != nil {
return err
}
var key [4]byte
binary.BigEndian.PutUint32(key[:], uint32(pk.Index))
return ih.Put(key[:], []byte(func() string {
if b {
return boltDbCompleteValue
} else {
return boltDbIncompleteValue
}
}()))
})
}
func (me *boltPieceCompletion) Close() error {
return me.db.Close()
}

View File

@@ -0,0 +1,101 @@
package storage
import (
"encoding/binary"
"github.com/anacrolix/missinggo/x"
"github.com/boltdb/bolt"
"github.com/anacrolix/torrent/metainfo"
)
type boltDBPiece struct {
db *bolt.DB
p metainfo.Piece
ih metainfo.Hash
key [24]byte
}
var (
_ PieceImpl = (*boltDBPiece)(nil)
dataBucketKey = []byte("data")
)
func (me *boltDBPiece) pc() PieceCompletionGetSetter {
return boltPieceCompletion{me.db}
}
func (me *boltDBPiece) pk() metainfo.PieceKey {
return metainfo.PieceKey{me.ih, me.p.Index()}
}
func (me *boltDBPiece) Completion() Completion {
c, err := me.pc().Get(me.pk())
x.Pie(err)
return c
}
func (me *boltDBPiece) MarkComplete() error {
return me.pc().Set(me.pk(), true)
}
func (me *boltDBPiece) MarkNotComplete() error {
return me.pc().Set(me.pk(), false)
}
func (me *boltDBPiece) ReadAt(b []byte, off int64) (n int, err error) {
err = me.db.View(func(tx *bolt.Tx) error {
db := tx.Bucket(dataBucketKey)
if db == nil {
return nil
}
ci := off / chunkSize
off %= chunkSize
for len(b) != 0 {
ck := me.chunkKey(int(ci))
_b := db.Get(ck[:])
if len(_b) != chunkSize {
break
}
n1 := copy(b, _b[off:])
off = 0
ci++
b = b[n1:]
n += n1
}
return nil
})
return
}
func (me *boltDBPiece) chunkKey(index int) (ret [26]byte) {
copy(ret[:], me.key[:])
binary.BigEndian.PutUint16(ret[24:], uint16(index))
return
}
func (me *boltDBPiece) WriteAt(b []byte, off int64) (n int, err error) {
err = me.db.Update(func(tx *bolt.Tx) error {
db, err := tx.CreateBucketIfNotExists(dataBucketKey)
if err != nil {
return err
}
ci := off / chunkSize
off %= chunkSize
for len(b) != 0 {
_b := make([]byte, chunkSize)
ck := me.chunkKey(int(ci))
copy(_b, db.Get(ck[:]))
n1 := copy(_b[off:], b)
db.Put(ck[:], _b)
if n1 > len(b) {
break
}
b = b[n1:]
off = 0
ci++
n += n1
}
return nil
})
return
}

57
vendor/github.com/anacrolix/torrent/storage/boltdb.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
package storage
import (
"encoding/binary"
"path/filepath"
"time"
"github.com/anacrolix/missinggo/expect"
"github.com/boltdb/bolt"
"github.com/anacrolix/torrent/metainfo"
)
const (
// Chosen to match the usual chunk size in a torrent client. This way,
// most chunk writes are to exactly one full item in bolt DB.
chunkSize = 1 << 14
)
type boltDBClient struct {
db *bolt.DB
}
type boltDBTorrent struct {
cl *boltDBClient
ih metainfo.Hash
}
func NewBoltDB(filePath string) ClientImpl {
db, err := bolt.Open(filepath.Join(filePath, "bolt.db"), 0600, &bolt.Options{
Timeout: time.Second,
})
expect.Nil(err)
db.NoSync = true
return &boltDBClient{db}
}
func (me *boltDBClient) Close() error {
return me.db.Close()
}
func (me *boltDBClient) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
return &boltDBTorrent{me, infoHash}, nil
}
func (me *boltDBTorrent) Piece(p metainfo.Piece) PieceImpl {
ret := &boltDBPiece{
p: p,
db: me.cl.db,
ih: me.ih,
}
copy(ret.key[:], me.ih[:])
binary.BigEndian.PutUint32(ret.key[20:], uint32(p.Index()))
return ret
}
func (boltDBTorrent) Close() error { return nil }

View File

@@ -0,0 +1,27 @@
package storage
import (
"log"
"github.com/anacrolix/torrent/metainfo"
)
type PieceCompletionGetSetter interface {
Get(metainfo.PieceKey) (Completion, error)
Set(_ metainfo.PieceKey, complete bool) error
}
// Implementations track the completion of pieces. It must be concurrent-safe.
type PieceCompletion interface {
PieceCompletionGetSetter
Close() error
}
func pieceCompletionForDir(dir string) (ret PieceCompletion) {
ret, err := NewBoltPieceCompletion(dir)
if err != nil {
log.Printf("couldn't open piece completion db in %q: %s", dir, err)
ret = NewMapPieceCompletion()
}
return
}

View File

@@ -0,0 +1,37 @@
package storage
import (
"sync"
"github.com/anacrolix/torrent/metainfo"
)
type mapPieceCompletion struct {
mu sync.Mutex
m map[metainfo.PieceKey]bool
}
var _ PieceCompletion = (*mapPieceCompletion)(nil)
func NewMapPieceCompletion() PieceCompletion {
return &mapPieceCompletion{m: make(map[metainfo.PieceKey]bool)}
}
func (*mapPieceCompletion) Close() error { return nil }
func (me *mapPieceCompletion) Get(pk metainfo.PieceKey) (c Completion, err error) {
me.mu.Lock()
defer me.mu.Unlock()
c.Complete, c.Ok = me.m[pk]
return
}
func (me *mapPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
me.mu.Lock()
defer me.mu.Unlock()
if me.m == nil {
me.m = make(map[metainfo.PieceKey]bool)
}
me.m[pk] = b
return nil
}

2
vendor/github.com/anacrolix/torrent/storage/doc.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Package storage implements storage backends for package torrent.
package storage

219
vendor/github.com/anacrolix/torrent/storage/file.go generated vendored Normal file
View File

@@ -0,0 +1,219 @@
package storage
import (
"io"
"os"
"path/filepath"
"github.com/anacrolix/missinggo"
"github.com/anacrolix/torrent/metainfo"
)
// File-based storage for torrents, that isn't yet bound to a particular
// torrent.
type fileClientImpl struct {
baseDir string
pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string
pc PieceCompletion
}
// The Default path maker just returns the current path
func defaultPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
return baseDir
}
func infoHashPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
return filepath.Join(baseDir, infoHash.HexString())
}
// All Torrent data stored in this baseDir
func NewFile(baseDir string) ClientImpl {
return NewFileWithCompletion(baseDir, pieceCompletionForDir(baseDir))
}
func NewFileWithCompletion(baseDir string, completion PieceCompletion) ClientImpl {
return newFileWithCustomPathMakerAndCompletion(baseDir, nil, completion)
}
// File storage with data partitioned by infohash.
func NewFileByInfoHash(baseDir string) ClientImpl {
return NewFileWithCustomPathMaker(baseDir, infoHashPathMaker)
}
// Allows passing a function to determine the path for storing torrent data
func NewFileWithCustomPathMaker(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string) ClientImpl {
return newFileWithCustomPathMakerAndCompletion(baseDir, pathMaker, pieceCompletionForDir(baseDir))
}
func newFileWithCustomPathMakerAndCompletion(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string, completion PieceCompletion) ClientImpl {
if pathMaker == nil {
pathMaker = defaultPathMaker
}
return &fileClientImpl{
baseDir: baseDir,
pathMaker: pathMaker,
pc: completion,
}
}
func (me *fileClientImpl) Close() error {
return me.pc.Close()
}
func (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
dir := fs.pathMaker(fs.baseDir, info, infoHash)
err := CreateNativeZeroLengthFiles(info, dir)
if err != nil {
return nil, err
}
return &fileTorrentImpl{
dir,
info,
infoHash,
fs.pc,
}, nil
}
type fileTorrentImpl struct {
dir string
info *metainfo.Info
infoHash metainfo.Hash
completion PieceCompletion
}
func (fts *fileTorrentImpl) Piece(p metainfo.Piece) PieceImpl {
// Create a view onto the file-based torrent storage.
_io := fileTorrentImplIO{fts}
// Return the appropriate segments of this.
return &filePieceImpl{
fts,
p,
missinggo.NewSectionWriter(_io, p.Offset(), p.Length()),
io.NewSectionReader(_io, p.Offset(), p.Length()),
}
}
func (fs *fileTorrentImpl) Close() error {
return nil
}
// Creates natives files for any zero-length file entries in the info. This is
// a helper for file-based storages, which don't address or write to zero-
// length files because they have no corresponding pieces.
func CreateNativeZeroLengthFiles(info *metainfo.Info, dir string) (err error) {
for _, fi := range info.UpvertedFiles() {
if fi.Length != 0 {
continue
}
name := filepath.Join(append([]string{dir, info.Name}, fi.Path...)...)
os.MkdirAll(filepath.Dir(name), 0777)
var f io.Closer
f, err = os.Create(name)
if err != nil {
break
}
f.Close()
}
return
}
// Exposes file-based storage of a torrent, as one big ReadWriterAt.
type fileTorrentImplIO struct {
fts *fileTorrentImpl
}
// Returns EOF on short or missing file.
func (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {
f, err := os.Open(fst.fts.fileInfoName(fi))
if os.IsNotExist(err) {
// File missing is treated the same as a short file.
err = io.EOF
return
}
if err != nil {
return
}
defer f.Close()
// Limit the read to within the expected bounds of this file.
if int64(len(b)) > fi.Length-off {
b = b[:fi.Length-off]
}
for off < fi.Length && len(b) != 0 {
n1, err1 := f.ReadAt(b, off)
b = b[n1:]
n += n1
off += int64(n1)
if n1 == 0 {
err = err1
break
}
}
return
}
// Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.
func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
for _, fi := range fst.fts.info.UpvertedFiles() {
for off < fi.Length {
n1, err1 := fst.readFileAt(fi, b, off)
n += n1
off += int64(n1)
b = b[n1:]
if len(b) == 0 {
// Got what we need.
return
}
if n1 != 0 {
// Made progress.
continue
}
err = err1
if err == io.EOF {
// Lies.
err = io.ErrUnexpectedEOF
}
return
}
off -= fi.Length
}
err = io.EOF
return
}
func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
for _, fi := range fst.fts.info.UpvertedFiles() {
if off >= fi.Length {
off -= fi.Length
continue
}
n1 := len(p)
if int64(n1) > fi.Length-off {
n1 = int(fi.Length - off)
}
name := fst.fts.fileInfoName(fi)
os.MkdirAll(filepath.Dir(name), 0777)
var f *os.File
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return
}
n1, err = f.WriteAt(p[:n1], off)
// TODO: On some systems, write errors can be delayed until the Close.
f.Close()
if err != nil {
return
}
n += n1
off = 0
p = p[n1:]
if len(p) == 0 {
break
}
}
return
}
func (fts *fileTorrentImpl) fileInfoName(fi metainfo.FileInfo) string {
return filepath.Join(append([]string{fts.dir, fts.info.Name}, fi.Path...)...)
}

View File

@@ -0,0 +1,29 @@
package storage
import "github.com/anacrolix/torrent/metainfo"
func extentCompleteRequiredLengths(info *metainfo.Info, off, n int64) (ret []metainfo.FileInfo) {
if n == 0 {
return
}
for _, fi := range info.UpvertedFiles() {
if off >= fi.Length {
off -= fi.Length
continue
}
n1 := n
if off+n1 > fi.Length {
n1 = fi.Length - off
}
ret = append(ret, metainfo.FileInfo{
Path: fi.Path,
Length: off + n1,
})
n -= n1
if n == 0 {
return
}
off = 0
}
panic("extent exceeds torrent bounds")
}

View File

@@ -0,0 +1,53 @@
package storage
import (
"io"
"log"
"os"
"github.com/anacrolix/torrent/metainfo"
)
type filePieceImpl struct {
*fileTorrentImpl
p metainfo.Piece
io.WriterAt
io.ReaderAt
}
var _ PieceImpl = (*filePieceImpl)(nil)
func (me *filePieceImpl) pieceKey() metainfo.PieceKey {
return metainfo.PieceKey{me.infoHash, me.p.Index()}
}
func (fs *filePieceImpl) Completion() Completion {
c, err := fs.completion.Get(fs.pieceKey())
if err != nil {
log.Printf("error getting piece completion: %s", err)
c.Ok = false
return c
}
// If it's allegedly complete, check that its constituent files have the
// necessary length.
for _, fi := range extentCompleteRequiredLengths(fs.p.Info, fs.p.Offset(), fs.p.Length()) {
s, err := os.Stat(fs.fileInfoName(fi))
if err != nil || s.Size() < fi.Length {
c.Complete = false
break
}
}
if !c.Complete {
// The completion was wrong, fix it.
fs.completion.Set(fs.pieceKey(), false)
}
return c
}
func (fs *filePieceImpl) MarkComplete() error {
return fs.completion.Set(fs.pieceKey(), true)
}
func (fs *filePieceImpl) MarkNotComplete() error {
return fs.completion.Set(fs.pieceKey(), false)
}

View File

@@ -0,0 +1,40 @@
package storage
import (
"io"
"github.com/anacrolix/torrent/metainfo"
)
// Represents data storage for an unspecified torrent.
type ClientImpl interface {
OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error)
Close() error
}
// Data storage bound to a torrent.
type TorrentImpl interface {
Piece(metainfo.Piece) PieceImpl
Close() error
}
// Interacts with torrent piece data.
type PieceImpl interface {
// These interfaces are not as strict as normally required. They can
// assume that the parameters are appropriate for the dimensions of the
// piece.
io.ReaderAt
io.WriterAt
// Called when the client believes the piece data will pass a hash check.
// The storage can move or mark the piece data as read-only as it sees
// fit.
MarkComplete() error
MarkNotComplete() error
// Returns true if the piece is complete.
Completion() Completion
}
type Completion struct {
Complete bool
Ok bool
}

161
vendor/github.com/anacrolix/torrent/storage/mmap.go generated vendored Normal file
View File

@@ -0,0 +1,161 @@
package storage
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"github.com/anacrolix/missinggo"
"github.com/edsrzf/mmap-go"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/mmap_span"
)
type mmapClientImpl struct {
baseDir string
pc PieceCompletion
}
func NewMMap(baseDir string) ClientImpl {
return NewMMapWithCompletion(baseDir, pieceCompletionForDir(baseDir))
}
func NewMMapWithCompletion(baseDir string, completion PieceCompletion) ClientImpl {
return &mmapClientImpl{
baseDir: baseDir,
pc: completion,
}
}
func (s *mmapClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (t TorrentImpl, err error) {
span, err := mMapTorrent(info, s.baseDir)
t = &mmapTorrentStorage{
infoHash: infoHash,
span: span,
pc: s.pc,
}
return
}
func (s *mmapClientImpl) Close() error {
return s.pc.Close()
}
type mmapTorrentStorage struct {
infoHash metainfo.Hash
span *mmap_span.MMapSpan
pc PieceCompletion
}
func (ts *mmapTorrentStorage) Piece(p metainfo.Piece) PieceImpl {
return mmapStoragePiece{
pc: ts.pc,
p: p,
ih: ts.infoHash,
ReaderAt: io.NewSectionReader(ts.span, p.Offset(), p.Length()),
WriterAt: missinggo.NewSectionWriter(ts.span, p.Offset(), p.Length()),
}
}
func (ts *mmapTorrentStorage) Close() error {
ts.pc.Close()
return ts.span.Close()
}
type mmapStoragePiece struct {
pc PieceCompletion
p metainfo.Piece
ih metainfo.Hash
io.ReaderAt
io.WriterAt
}
func (me mmapStoragePiece) pieceKey() metainfo.PieceKey {
return metainfo.PieceKey{me.ih, me.p.Index()}
}
func (sp mmapStoragePiece) Completion() Completion {
c, _ := sp.pc.Get(sp.pieceKey())
return c
}
func (sp mmapStoragePiece) MarkComplete() error {
sp.pc.Set(sp.pieceKey(), true)
return nil
}
func (sp mmapStoragePiece) MarkNotComplete() error {
sp.pc.Set(sp.pieceKey(), false)
return nil
}
func mMapTorrent(md *metainfo.Info, location string) (mms *mmap_span.MMapSpan, err error) {
mms = &mmap_span.MMapSpan{}
defer func() {
if err != nil {
mms.Close()
}
}()
for _, miFile := range md.UpvertedFiles() {
fileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)
var mm mmap.MMap
mm, err = mmapFile(fileName, miFile.Length)
if err != nil {
err = fmt.Errorf("file %q: %s", miFile.DisplayPath(md), err)
return
}
if mm != nil {
mms.Append(mm)
}
}
return
}
func mmapFile(name string, size int64) (ret mmap.MMap, err error) {
dir := filepath.Dir(name)
err = os.MkdirAll(dir, 0777)
if err != nil {
err = fmt.Errorf("making directory %q: %s", dir, err)
return
}
var file *os.File
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return
}
defer file.Close()
var fi os.FileInfo
fi, err = file.Stat()
if err != nil {
return
}
if fi.Size() < size {
// I think this is necessary on HFS+. Maybe Linux will SIGBUS too if
// you overmap a file but I'm not sure.
err = file.Truncate(size)
if err != nil {
return
}
}
if size == 0 {
// Can't mmap() regions with length 0.
return
}
intLen := int(size)
if int64(intLen) != size {
err = errors.New("size too large for system")
return
}
ret, err = mmap.MapRegion(file, intLen, mmap.RDWR, 0, 0)
if err != nil {
err = fmt.Errorf("error mapping region: %s", err)
return
}
if int64(len(ret)) != size {
panic(len(ret))
}
return
}

View File

@@ -0,0 +1,77 @@
package storage
import (
"path"
"github.com/anacrolix/missinggo/resource"
"github.com/anacrolix/torrent/metainfo"
)
type piecePerResource struct {
p resource.Provider
}
func NewResourcePieces(p resource.Provider) ClientImpl {
return &piecePerResource{
p: p,
}
}
func (s *piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
return s, nil
}
func (s *piecePerResource) Close() error {
return nil
}
func (s *piecePerResource) Piece(p metainfo.Piece) PieceImpl {
completed, err := s.p.NewInstance(path.Join("completed", p.Hash().HexString()))
if err != nil {
panic(err)
}
incomplete, err := s.p.NewInstance(path.Join("incomplete", p.Hash().HexString()))
if err != nil {
panic(err)
}
return piecePerResourcePiece{
p: p,
c: completed,
i: incomplete,
}
}
type piecePerResourcePiece struct {
p metainfo.Piece
c resource.Instance
i resource.Instance
}
func (s piecePerResourcePiece) Completion() Completion {
fi, err := s.c.Stat()
return Completion{
Complete: err == nil && fi.Size() == s.p.Length(),
Ok: true,
}
}
func (s piecePerResourcePiece) MarkComplete() error {
return resource.Move(s.i, s.c)
}
func (s piecePerResourcePiece) MarkNotComplete() error {
return s.c.Delete()
}
func (s piecePerResourcePiece) ReadAt(b []byte, off int64) (int, error) {
if s.Completion().Complete {
return s.c.ReadAt(b, off)
} else {
return s.i.ReadAt(b, off)
}
}
func (s piecePerResourcePiece) WriteAt(b []byte, off int64) (n int, err error) {
return s.i.WriteAt(b, off)
}

View File

@@ -0,0 +1,56 @@
// +build cgo
package storage
import (
"database/sql"
"path/filepath"
_ "github.com/mattn/go-sqlite3"
"github.com/anacrolix/torrent/metainfo"
)
type sqlitePieceCompletion struct {
db *sql.DB
}
var _ PieceCompletion = (*sqlitePieceCompletion)(nil)
func NewSqlitePieceCompletion(dir string) (ret *sqlitePieceCompletion, err error) {
p := filepath.Join(dir, ".torrent.db")
db, err := sql.Open("sqlite3", p)
if err != nil {
return
}
db.SetMaxOpenConns(1)
db.Exec(`PRAGMA journal_mode=WAL`)
db.Exec(`PRAGMA synchronous=1`)
_, err = db.Exec(`create table if not exists piece_completion(infohash, "index", complete, unique(infohash, "index"))`)
if err != nil {
db.Close()
return
}
ret = &sqlitePieceCompletion{db}
return
}
func (me *sqlitePieceCompletion) Get(pk metainfo.PieceKey) (c Completion, err error) {
row := me.db.QueryRow(`select complete from piece_completion where infohash=? and "index"=?`, pk.InfoHash.HexString(), pk.Index)
err = row.Scan(&c.Complete)
if err == sql.ErrNoRows {
err = nil
} else if err == nil {
c.Ok = true
}
return
}
func (me *sqlitePieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
_, err := me.db.Exec(`insert or replace into piece_completion(infohash, "index", complete) values(?, ?, ?)`, pk.InfoHash.HexString(), pk.Index, b)
return err
}
func (me *sqlitePieceCompletion) Close() error {
return me.db.Close()
}

View File

@@ -0,0 +1,86 @@
package storage
import (
"io"
"os"
"github.com/anacrolix/missinggo"
"github.com/anacrolix/torrent/metainfo"
)
type Client struct {
ci ClientImpl
}
func NewClient(cl ClientImpl) *Client {
return &Client{cl}
}
func (cl Client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (*Torrent, error) {
t, err := cl.ci.OpenTorrent(info, infoHash)
return &Torrent{t}, err
}
type Torrent struct {
TorrentImpl
}
func (t Torrent) Piece(p metainfo.Piece) Piece {
return Piece{t.TorrentImpl.Piece(p), p}
}
type Piece struct {
PieceImpl
mip metainfo.Piece
}
func (p Piece) WriteAt(b []byte, off int64) (n int, err error) {
// Callers should not be writing to completed pieces, but it's too
// expensive to be checking this on every single write using uncached
// completions.
// c := p.Completion()
// if c.Ok && c.Complete {
// err = errors.New("piece already completed")
// return
// }
if off+int64(len(b)) > p.mip.Length() {
panic("write overflows piece")
}
b = missinggo.LimitLen(b, p.mip.Length()-off)
return p.PieceImpl.WriteAt(b, off)
}
func (p Piece) ReadAt(b []byte, off int64) (n int, err error) {
if off < 0 {
err = os.ErrInvalid
return
}
if off >= p.mip.Length() {
err = io.EOF
return
}
b = missinggo.LimitLen(b, p.mip.Length()-off)
if len(b) == 0 {
return
}
n, err = p.PieceImpl.ReadAt(b, off)
if n > len(b) {
panic(n)
}
off += int64(n)
if err == io.EOF && off < p.mip.Length() {
err = io.ErrUnexpectedEOF
}
if err == nil && off >= p.mip.Length() {
err = io.EOF
}
if n == 0 && err == nil {
err = io.ErrUnexpectedEOF
}
if off < p.mip.Length() && err != nil {
p.MarkNotComplete()
}
return
}