summaryrefslogtreecommitdiff
path: root/src/System/Torrent/Storage.hs
diff options
context:
space:
mode:
Diffstat (limited to 'src/System/Torrent/Storage.hs')
-rw-r--r--src/System/Torrent/Storage.hs135
1 files changed, 111 insertions, 24 deletions
diff --git a/src/System/Torrent/Storage.hs b/src/System/Torrent/Storage.hs
index a5529fe6..cb0494e8 100644
--- a/src/System/Torrent/Storage.hs
+++ b/src/System/Torrent/Storage.hs
@@ -20,29 +20,33 @@
20{-# LANGUAGE RecordWildCards #-} 20{-# LANGUAGE RecordWildCards #-}
21module System.Torrent.Storage 21module System.Torrent.Storage
22 ( Storage 22 ( Storage
23 , ppStorage
23 24
24 -- * Construction 25 -- * Construction
25 , bindTo, unbind, withStorage 26 , bindTo, unbind, withStorage
26 27
27 -- * Modification 28 -- * Modification
28 , getBlk, putBlk 29 , getBlk, putBlk, selBlk
29 ) where 30 ) where
30 31
31import Control.Applicative 32import Control.Applicative
32import Control.Concurrent.STM 33import Control.Concurrent.STM
33import Control.Exception 34import Control.Exception
34import Control.Monad 35import Control.Monad
36import Control.Monad.Trans
37
35import Data.ByteString as B 38import Data.ByteString as B
36import qualified Data.ByteString.Lazy as Lazy 39import qualified Data.ByteString.Lazy as Lazy
37import Data.List as L 40import Data.List as L
41import Text.PrettyPrint
38import System.FilePath 42import System.FilePath
39import System.Directory 43import System.Directory
40 44
41import Data.Bitfield 45import Data.Bitfield as BF
42import Data.Torrent 46import Data.Torrent
43import Network.BitTorrent.Exchange.Protocol 47import Network.BitTorrent.Exchange.Protocol
44import Network.BitTorrent.Internal 48import Network.BitTorrent.Internal
45import System.IO.MMap.Fixed 49import System.IO.MMap.Fixed as Fixed
46 50
47 51
48data Storage = Storage { 52data Storage = Storage {
@@ -51,14 +55,21 @@ data Storage = Storage {
51 55
52 -- | 56 -- |
53 , blocks :: !(TVar Bitfield) 57 , blocks :: !(TVar Bitfield)
58 -- TODO use bytestring for fast serialization
59 -- because we need to write this bitmap to disc periodically
60
61
62 , blockSize :: !Int
54 63
55 -- | Used to map linear block addresses to disjoint 64 -- | Used to map linear block addresses to disjoint
56 -- mallocated/mmaped adresses. 65 -- mallocated/mmaped adresses.
57 , payload :: !Fixed 66 , payload :: !Fixed
58 } 67 }
59 68
60pieceSize :: Storage -> Int 69ppStorage :: Storage -> IO Doc
61pieceSize = ciPieceLength . tInfo . torrentMeta . session 70ppStorage Storage {..} = pp <$> readTVarIO blocks
71 where
72 pp bf = int blockSize
62 73
63{----------------------------------------------------------------------- 74{-----------------------------------------------------------------------
64 Construction 75 Construction
@@ -67,9 +78,15 @@ pieceSize = ciPieceLength . tInfo . torrentMeta . session
67-- TODO doc args 78-- TODO doc args
68bindTo :: SwarmSession -> FilePath -> IO Storage 79bindTo :: SwarmSession -> FilePath -> IO Storage
69bindTo se @ SwarmSession {..} contentPath = do 80bindTo se @ SwarmSession {..} contentPath = do
70 let content_paths = contentLayout contentPath (tInfo torrentMeta) 81 let contentInfo = tInfo torrentMeta
82 let content_paths = contentLayout contentPath contentInfo
71 mapM_ mkDir (L.map fst content_paths) 83 mapM_ mkDir (L.map fst content_paths)
72 Storage se <$> newTVarIO (haveNone (ciPieceLength (tInfo torrentMeta))) 84
85 let pieceLen = pieceLength se
86 let blockSize = min defaultBlockSize pieceLen
87 print $ "content length " ++ show (contentLength contentInfo)
88 Storage se <$> newTVarIO (haveNone (blockCount blockSize contentInfo))
89 <*> pure blockSize
73 <*> coalesceFiles content_paths 90 <*> coalesceFiles content_paths
74 where 91 where
75 mkDir path = do 92 mkDir path = do
@@ -90,43 +107,113 @@ withStorage se path = bracket (se `bindTo` path) unbind
90-----------------------------------------------------------------------} 107-----------------------------------------------------------------------}
91 108
92-- TODO to avoid races we might need to try Control.Concurrent.yield 109-- TODO to avoid races we might need to try Control.Concurrent.yield
93-- TODO lazy block payload 110-- TODO make block_payload :: Lazy.ByteString
111
112selBlk :: MonadIO m => PieceIx -> Storage -> m [BlockIx]
113selBlk pix st @ Storage {..} = liftIO $ atomically $ do
114 mask <- pieceMask pix st
115 select mask <$> readTVar blocks
116 where
117 select mask = fmap mkBix . toList . difference mask
118 -- TODO clip upper bound of block index
119 mkBix ix = BlockIx pix (blockSize * (ix - offset)) blockSize
120
121 offset = coeff * pix
122 coeff = pieceLength session `div` blockSize
123
124--
125-- TODO make global lock map -- otherwise we might get broken pieces
126--
127-- imagine the following situation:
128--
129-- thread1: write
130-- thread1: mark
131--
132-- this let us avoid races as well
133--
94 134
95-- | Write a block to the storage. If block out of range then block is clipped. 135-- | Write a block to the storage. If block out of range then block is clipped.
96putBlk :: Block -> Storage -> IO () 136--
97putBlk blk @ Block {..} st @ Storage {..} = do 137--
138--
139putBlk :: MonadIO m => Block -> Storage -> m Bool
140putBlk blk @ Block {..} st @ Storage {..} = liftIO $ do
98-- let blkIx = undefined 141-- let blkIx = undefined
99-- bm <- readTVarIO blocks 142-- bm <- readTVarIO blocks
100-- unless (member blkIx bm) $ do 143-- unless (member blkIx bm) $ do
101 writeBytes (blkInterval (pieceSize st) blk) 144 writeBytes (blkInterval (pieceLength session) blk)
102 (Lazy.fromChunks [blkData]) 145 (Lazy.fromChunks [blkData])
103 payload 146 payload
104-- when (undefined bm blkIx) $ do 147
105-- if checkPiece ci piIx piece 148 markBlock blk st
106-- then return True 149 validatePiece blkPiece st
107-- else do 150
108-- reset 151markBlock :: Block -> Storage -> IO ()
109-- return False 152markBlock Block {..} Storage {..} = do
153 let piLen = pieceLength session
154 let glIx = (piLen `div` blockSize) * blkPiece + (blkOffset `div` blockSize)
155 atomically $ modifyTVar' blocks (have glIx)
110 156
111-- | Read a block by given block index. If lower or upper bound out of 157-- | Read a block by given block index. If lower or upper bound out of
112-- range then index is clipped. 158-- range then index is clipped.
113getBlk :: BlockIx -> Storage -> IO Block 159--
114getBlk ix @ BlockIx {..} st @ Storage {..} = do 160-- Do not block.
115 bs <- readBytes (ixInterval (pieceSize st) ix) payload 161--
162getBlk :: MonadIO m => BlockIx -> Storage -> m Block
163getBlk ix @ BlockIx {..} st @ Storage {..} = liftIO $ do
164 -- TODO check if __piece__ is available
165 bs <- readBytes (ixInterval (pieceLength session) ix) payload
116 return $ Block ixPiece ixOffset (Lazy.toStrict bs) 166 return $ Block ixPiece ixOffset (Lazy.toStrict bs)
117 167
118-- | Should be used to verify piece.
119getPiece :: PieceIx -> Storage -> IO ByteString 168getPiece :: PieceIx -> Storage -> IO ByteString
120getPiece ix st = blkData <$> getBlk (BlockIx ix 0 (pieceSize st)) st 169getPiece pix st @ Storage {..} = do
170 let pieceLen = pieceLength session
171 let bix = BlockIx pix 0 (pieceLength session)
172 bs <- readBytes (ixInterval pieceLen bix) payload
173 return (Lazy.toStrict bs)
174
175resetPiece :: PieceIx -> Storage -> IO ()
176resetPiece pix st @ Storage {..} = atomically $ do
177 mask <- pieceMask pix st
178 modifyTVar' blocks (`difference` mask)
179
180validatePiece :: PieceIx -> Storage -> IO Bool
181validatePiece pix st @ Storage {..} = do
182 downloaded <- atomically $ isDownloaded pix st
183 if not downloaded then return False
184 else do
185 print $ show pix ++ " downloaded"
186 piece <- getPiece pix st
187 if checkPiece (tInfo (torrentMeta session)) pix piece
188 then return True
189 else do
190 print $ "----------------------------- invalid " ++ show pix
191-- resetPiece pix st
192 return True
121 193
122{----------------------------------------------------------------------- 194{-----------------------------------------------------------------------
123 Internal 195 Internal
124-----------------------------------------------------------------------} 196-----------------------------------------------------------------------}
125 197
198isDownloaded :: PieceIx -> Storage -> STM Bool
199isDownloaded pix st @ Storage {..} = do
200 bf <- readTVar blocks
201 mask <- pieceMask pix st
202 return $ intersection mask bf == mask
203
204pieceMask :: PieceIx -> Storage -> STM Bitfield
205pieceMask pix Storage {..} = do
206 bf <- readTVar blocks
207 return $ BF.interval (totalCount bf) offset (offset + coeff - 1)
208 where
209 offset = coeff * pix
210 coeff = pieceLength session `div` blockSize
211
212
126ixInterval :: Int -> BlockIx -> FixedInterval 213ixInterval :: Int -> BlockIx -> FixedInterval
127ixInterval pieceSize BlockIx {..} = 214ixInterval pieceSize BlockIx {..} =
128 interval (ixPiece * pieceSize + ixOffset) ixLength 215 Fixed.interval (ixPiece * pieceSize + ixOffset) ixLength
129 216
130blkInterval :: Int -> Block -> FixedInterval 217blkInterval :: Int -> Block -> FixedInterval
131blkInterval pieceSize Block {..} = 218blkInterval pieceSize Block {..} =
132 interval (blkPiece * pieceSize + blkOffset) (B.length blkData) \ No newline at end of file 219 Fixed.interval (blkPiece * pieceSize + blkOffset) (B.length blkData) \ No newline at end of file