diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/Data/Bitfield.hs | 359 | ||||
-rw-r--r-- | src/Data/Torrent.hs | 511 | ||||
-rw-r--r-- | src/Network/BitTorrent.hs | 4 | ||||
-rw-r--r-- | src/Network/BitTorrent/DHT/Protocol.hs | 2 | ||||
-rw-r--r-- | src/Network/BitTorrent/Exchange.hs | 3 | ||||
-rw-r--r-- | src/Network/BitTorrent/Exchange/Protocol.hs | 152 | ||||
-rw-r--r-- | src/Network/BitTorrent/Sessions.hs | 4 | ||||
-rw-r--r-- | src/Network/BitTorrent/Sessions/Types.lhs | 6 | ||||
-rw-r--r-- | src/Network/BitTorrent/Tracker.hs | 2 | ||||
-rw-r--r-- | src/Network/BitTorrent/Tracker/HTTP.hs | 2 | ||||
-rw-r--r-- | src/Network/BitTorrent/Tracker/Protocol.hs | 2 | ||||
-rw-r--r-- | src/Network/BitTorrent/Tracker/UDP.hs | 2 | ||||
-rw-r--r-- | src/System/IO/MMap/Fixed.hs | 212 | ||||
-rw-r--r-- | src/System/Torrent/Storage.hs | 332 |
14 files changed, 32 insertions, 1561 deletions
diff --git a/src/Data/Bitfield.hs b/src/Data/Bitfield.hs deleted file mode 100644 index acfca0d0..00000000 --- a/src/Data/Bitfield.hs +++ /dev/null | |||
@@ -1,359 +0,0 @@ | |||
1 | -- | | ||
2 | -- Copyright : (c) Sam T. 2013 | ||
3 | -- License : MIT | ||
4 | -- Maintainer : pxqr.sta@gmail.com | ||
5 | -- Stability : experimental | ||
6 | -- Portability : portable | ||
7 | -- | ||
8 | -- This modules provides all necessary machinery to work with | ||
9 | -- bitfields. Bitfields are used to keep track indices of complete | ||
10 | -- pieces either peer have or client have. | ||
11 | -- | ||
12 | -- There are also commonly used piece seletion algorithms | ||
13 | -- which used to find out which one next piece to download. | ||
14 | -- Selectors considered to be used in the following order: | ||
15 | -- | ||
16 | -- * Random first - at the start. | ||
17 | -- | ||
18 | -- * Rarest first selection - performed to avoid situation when | ||
19 | -- rarest piece is unaccessible. | ||
20 | -- | ||
21 | -- * _End game_ seletion - performed after a peer has requested all | ||
22 | -- the subpieces of the content. | ||
23 | -- | ||
24 | -- Note that BitTorrent applies the strict priority policy for | ||
25 | -- /subpiece/ or /blocks/ selection. | ||
26 | -- | ||
27 | {-# LANGUAGE CPP #-} | ||
28 | {-# LANGUAGE BangPatterns #-} | ||
29 | {-# LANGUAGE RecordWildCards #-} | ||
30 | module Data.Bitfield | ||
31 | ( PieceIx, PieceCount, Bitfield | ||
32 | |||
33 | -- * Construction | ||
34 | , haveAll, haveNone, have, singleton | ||
35 | , interval | ||
36 | , adjustSize | ||
37 | |||
38 | -- * Query | ||
39 | , Data.Bitfield.null | ||
40 | , haveCount, totalCount, completeness | ||
41 | |||
42 | , member, notMember | ||
43 | , findMin, findMax | ||
44 | |||
45 | , isSubsetOf | ||
46 | |||
47 | , Frequency, frequencies, rarest | ||
48 | |||
49 | -- * Combine | ||
50 | , union | ||
51 | , intersection | ||
52 | , difference | ||
53 | |||
54 | -- * Serialization | ||
55 | , fromBitmap, toBitmap | ||
56 | , toList | ||
57 | |||
58 | -- * Selection | ||
59 | , Selector | ||
60 | , selector, strategyClass | ||
61 | |||
62 | , strictFirst, strictLast | ||
63 | , rarestFirst, randomFirst, endGame | ||
64 | |||
65 | #if defined (TESTING) | ||
66 | -- * Debug | ||
67 | , mkBitfield | ||
68 | #endif | ||
69 | ) where | ||
70 | |||
71 | import Control.Monad | ||
72 | import Control.Monad.ST | ||
73 | import Data.ByteString (ByteString) | ||
74 | import qualified Data.ByteString as B | ||
75 | import qualified Data.ByteString.Lazy as Lazy | ||
76 | import Data.Vector.Unboxed (Vector) | ||
77 | import qualified Data.Vector.Unboxed as V | ||
78 | import qualified Data.Vector.Unboxed.Mutable as VM | ||
79 | import Data.IntervalSet (IntSet) | ||
80 | import qualified Data.IntervalSet as S | ||
81 | import qualified Data.IntervalSet.ByteString as S | ||
82 | import Data.List (foldl') | ||
83 | import Data.Monoid | ||
84 | import Data.Ratio | ||
85 | |||
86 | |||
87 | -- | Pieces indexed from zero up to 'PieceCount' value. | ||
88 | type PieceIx = Int | ||
89 | |||
90 | -- | Used to represent max set bound. Min set bound is always set to | ||
91 | -- zero. | ||
92 | type PieceCount = Int | ||
93 | |||
94 | -- TODO cache some operations | ||
95 | |||
96 | -- | Bitfields are represented just as integer sets but with | ||
97 | -- restriction: the each set should be within given interval (or | ||
98 | -- subset of the specified interval). Size is used to specify | ||
99 | -- interval, so bitfield of size 10 might contain only indices in | ||
100 | -- interval [0..9]. | ||
101 | -- | ||
102 | data Bitfield = Bitfield { | ||
103 | bfSize :: !PieceCount | ||
104 | , bfSet :: !IntSet | ||
105 | } deriving (Show, Read, Eq) | ||
106 | |||
107 | -- Invariants: all elements of bfSet lie in [0..bfSize - 1]; | ||
108 | |||
109 | instance Monoid Bitfield where | ||
110 | {-# SPECIALIZE instance Monoid Bitfield #-} | ||
111 | mempty = haveNone 0 | ||
112 | mappend = union | ||
113 | mconcat = unions | ||
114 | |||
115 | {----------------------------------------------------------------------- | ||
116 | Construction | ||
117 | -----------------------------------------------------------------------} | ||
118 | |||
119 | -- | The empty bitfield of the given size. | ||
120 | haveNone :: PieceCount -> Bitfield | ||
121 | haveNone s = Bitfield s S.empty | ||
122 | |||
123 | -- | The full bitfield containing all piece indices for the given size. | ||
124 | haveAll :: PieceCount -> Bitfield | ||
125 | haveAll s = Bitfield s (S.interval 0 (s - 1)) | ||
126 | |||
127 | -- | Insert the index in the set ignoring out of range indices. | ||
128 | have :: PieceIx -> Bitfield -> Bitfield | ||
129 | have ix Bitfield {..} | ||
130 | | 0 <= ix && ix < bfSize = Bitfield bfSize (S.insert ix bfSet) | ||
131 | | otherwise = Bitfield bfSize bfSet | ||
132 | |||
133 | singleton :: PieceIx -> PieceCount -> Bitfield | ||
134 | singleton ix pc = have ix (haveNone pc) | ||
135 | |||
136 | -- | Assign new size to bitfield. FIXME Normally, size should be only | ||
137 | -- decreased, otherwise exception raised. | ||
138 | adjustSize :: PieceCount -> Bitfield -> Bitfield | ||
139 | adjustSize s Bitfield {..} = Bitfield s bfSet | ||
140 | |||
141 | -- | NOTE: for internal use only | ||
142 | interval :: PieceCount -> PieceIx -> PieceIx -> Bitfield | ||
143 | interval pc a b = Bitfield pc (S.interval a b) | ||
144 | |||
145 | {----------------------------------------------------------------------- | ||
146 | Query | ||
147 | -----------------------------------------------------------------------} | ||
148 | |||
149 | -- | Test if bitifield have no one index: peer do not have anything. | ||
150 | null :: Bitfield -> Bool | ||
151 | null Bitfield {..} = S.null bfSet | ||
152 | |||
153 | -- | Count of peer have pieces. | ||
154 | haveCount :: Bitfield -> PieceCount | ||
155 | haveCount = S.size . bfSet | ||
156 | |||
157 | -- | Total count of pieces and its indices. | ||
158 | totalCount :: Bitfield -> PieceCount | ||
159 | totalCount = bfSize | ||
160 | |||
161 | -- | Ratio of /have/ piece count to the /total/ piece count. | ||
162 | -- | ||
163 | -- > forall bf. 0 <= completeness bf <= 1 | ||
164 | -- | ||
165 | completeness :: Bitfield -> Ratio PieceCount | ||
166 | completeness b = haveCount b % totalCount b | ||
167 | |||
168 | inRange :: PieceIx -> Bitfield -> Bool | ||
169 | inRange ix Bitfield {..} = 0 <= ix && ix < bfSize | ||
170 | |||
171 | member :: PieceIx -> Bitfield -> Bool | ||
172 | member ix bf @ Bitfield {..} | ||
173 | | ix `inRange` bf = ix `S.member` bfSet | ||
174 | | otherwise = False | ||
175 | |||
176 | notMember :: PieceIx -> Bitfield -> Bool | ||
177 | notMember ix bf @ Bitfield {..} | ||
178 | | ix `inRange` bf = ix `S.notMember` bfSet | ||
179 | | otherwise = True | ||
180 | |||
181 | -- | Find first available piece index. | ||
182 | findMin :: Bitfield -> PieceIx | ||
183 | findMin = S.findMin . bfSet | ||
184 | {-# INLINE findMin #-} | ||
185 | |||
186 | -- | Find last available piece index. | ||
187 | findMax :: Bitfield -> PieceIx | ||
188 | findMax = S.findMax . bfSet | ||
189 | {-# INLINE findMax #-} | ||
190 | |||
191 | isSubsetOf :: Bitfield -> Bitfield -> Bool | ||
192 | isSubsetOf a b = bfSet a `S.isSubsetOf` bfSet b | ||
193 | |||
194 | -- | Frequencies are needed in piece selection startegies which use | ||
195 | -- availability quantity to find out the optimal next piece index to | ||
196 | -- download. | ||
197 | type Frequency = Int | ||
198 | |||
199 | -- | How many times each piece index occur in the given bitfield set. | ||
200 | frequencies :: [Bitfield] -> Vector Frequency | ||
201 | frequencies [] = V.fromList [] | ||
202 | frequencies xs = runST $ do | ||
203 | v <- VM.new size | ||
204 | VM.set v 0 | ||
205 | forM_ xs $ \ Bitfield {..} -> do | ||
206 | forM_ (S.toList bfSet) $ \ x -> do | ||
207 | fr <- VM.read v x | ||
208 | VM.write v x (succ fr) | ||
209 | V.unsafeFreeze v | ||
210 | where | ||
211 | size = maximum (map bfSize xs) | ||
212 | |||
213 | -- TODO it seems like this operation is veeery slow | ||
214 | |||
215 | -- | Find least available piece index. If no piece available return | ||
216 | -- 'Nothing'. | ||
217 | rarest :: [Bitfield] -> Maybe PieceIx | ||
218 | rarest xs | ||
219 | | V.null freqMap = Nothing | ||
220 | | otherwise | ||
221 | = Just $ fst $ V.ifoldr' minIx (0, freqMap V.! 0) freqMap | ||
222 | where | ||
223 | freqMap = frequencies xs | ||
224 | |||
225 | minIx :: PieceIx -> Frequency | ||
226 | -> (PieceIx, Frequency) | ||
227 | -> (PieceIx, Frequency) | ||
228 | minIx ix fr acc@(_, fra) | ||
229 | | fr < fra && fr > 0 = (ix, fr) | ||
230 | | otherwise = acc | ||
231 | |||
232 | |||
233 | {----------------------------------------------------------------------- | ||
234 | Combine | ||
235 | -----------------------------------------------------------------------} | ||
236 | |||
237 | -- | Find indices at least one peer have. | ||
238 | union :: Bitfield -> Bitfield -> Bitfield | ||
239 | union a b = {-# SCC union #-} Bitfield { | ||
240 | bfSize = bfSize a `max` bfSize b | ||
241 | , bfSet = bfSet a `S.union` bfSet b | ||
242 | } | ||
243 | |||
244 | -- | Find indices both peers have. | ||
245 | intersection :: Bitfield -> Bitfield -> Bitfield | ||
246 | intersection a b = {-# SCC intersection #-} Bitfield { | ||
247 | bfSize = bfSize a `min` bfSize b | ||
248 | , bfSet = bfSet a `S.intersection` bfSet b | ||
249 | } | ||
250 | |||
251 | -- | Find indices which have first peer but do not have the second peer. | ||
252 | difference :: Bitfield -> Bitfield -> Bitfield | ||
253 | difference a b = {-# SCC difference #-} Bitfield { | ||
254 | bfSize = bfSize a -- FIXME is it reasonable? | ||
255 | , bfSet = bfSet a `S.difference` bfSet b | ||
256 | } | ||
257 | |||
258 | -- | Find indices the any of the peers have. | ||
259 | unions :: [Bitfield] -> Bitfield | ||
260 | unions = {-# SCC unions #-} foldl' union (haveNone 0) | ||
261 | |||
262 | {----------------------------------------------------------------------- | ||
263 | Serialization | ||
264 | -----------------------------------------------------------------------} | ||
265 | |||
266 | -- | List all have indexes. | ||
267 | toList :: Bitfield -> [PieceIx] | ||
268 | toList Bitfield {..} = S.toList bfSet | ||
269 | |||
270 | -- | Unpack 'Bitfield' from tightly packed bit array. Note resulting | ||
271 | -- size might be more than real bitfield size, use 'adjustSize'. | ||
272 | fromBitmap :: ByteString -> Bitfield | ||
273 | fromBitmap bs = {-# SCC fromBitmap #-} Bitfield { | ||
274 | bfSize = B.length bs * 8 | ||
275 | , bfSet = S.fromByteString bs | ||
276 | } | ||
277 | {-# INLINE fromBitmap #-} | ||
278 | |||
279 | -- | Pack a 'Bitfield' to tightly packed bit array. | ||
280 | toBitmap :: Bitfield -> Lazy.ByteString | ||
281 | toBitmap Bitfield {..} = {-# SCC toBitmap #-} Lazy.fromChunks [intsetBM, alignment] | ||
282 | where | ||
283 | byteSize = bfSize `div` 8 + if bfSize `mod` 8 == 0 then 0 else 1 | ||
284 | alignment = B.replicate (byteSize - B.length intsetBM) 0 | ||
285 | intsetBM = S.toByteString bfSet | ||
286 | |||
287 | {----------------------------------------------------------------------- | ||
288 | Debug | ||
289 | -----------------------------------------------------------------------} | ||
290 | |||
291 | -- | For internal use only. | ||
292 | mkBitfield :: PieceCount -> [PieceIx] -> Bitfield | ||
293 | mkBitfield s ixs = Bitfield { | ||
294 | bfSize = s | ||
295 | , bfSet = S.splitGT (-1) $ S.splitLT s $ S.fromList ixs | ||
296 | } | ||
297 | |||
298 | {----------------------------------------------------------------------- | ||
299 | Selection | ||
300 | -----------------------------------------------------------------------} | ||
301 | |||
302 | type Selector = Bitfield -- ^ Indices of client /have/ pieces. | ||
303 | -> Bitfield -- ^ Indices of peer /have/ pieces. | ||
304 | -> [Bitfield] -- ^ Indices of other peers /have/ pieces. | ||
305 | -> Maybe PieceIx -- ^ Zero-based index of piece to request | ||
306 | -- to, if any. | ||
307 | |||
308 | selector :: Selector -- ^ Selector to use at the start. | ||
309 | -> Ratio PieceCount | ||
310 | -> Selector -- ^ Selector to use after the client have | ||
311 | -- the C pieces. | ||
312 | -> Selector -- ^ Selector that changes behaviour based | ||
313 | -- on completeness. | ||
314 | selector start pt ready h a xs = | ||
315 | case strategyClass pt h of | ||
316 | SCBeginning -> start h a xs | ||
317 | SCReady -> ready h a xs | ||
318 | SCEnd -> endGame h a xs | ||
319 | |||
320 | data StartegyClass | ||
321 | = SCBeginning | ||
322 | | SCReady | ||
323 | | SCEnd | ||
324 | deriving (Show, Eq, Ord, Enum, Bounded) | ||
325 | |||
326 | |||
327 | strategyClass :: Ratio PieceCount -> Bitfield -> StartegyClass | ||
328 | strategyClass threshold = classify . completeness | ||
329 | where | ||
330 | classify c | ||
331 | | c < threshold = SCBeginning | ||
332 | | c + 1 % numerator c < 1 = SCReady | ||
333 | -- FIXME numerator have is not total count | ||
334 | | otherwise = SCEnd | ||
335 | |||
336 | |||
337 | -- | Select the first available piece. | ||
338 | strictFirst :: Selector | ||
339 | strictFirst h a _ = Just $ findMin (difference a h) | ||
340 | |||
341 | -- | Select the last available piece. | ||
342 | strictLast :: Selector | ||
343 | strictLast h a _ = Just $ findMax (difference a h) | ||
344 | |||
345 | -- | | ||
346 | rarestFirst :: Selector | ||
347 | rarestFirst h a xs = rarest (map (intersection want) xs) | ||
348 | where | ||
349 | want = difference h a | ||
350 | |||
351 | -- | In average random first is faster than rarest first strategy but | ||
352 | -- only if all pieces are available. | ||
353 | randomFirst :: Selector | ||
354 | randomFirst = do | ||
355 | -- randomIO | ||
356 | error "randomFirst" | ||
357 | |||
358 | endGame :: Selector | ||
359 | endGame = strictLast | ||
diff --git a/src/Data/Torrent.hs b/src/Data/Torrent.hs deleted file mode 100644 index 36974ccf..00000000 --- a/src/Data/Torrent.hs +++ /dev/null | |||
@@ -1,511 +0,0 @@ | |||
1 | -- | | ||
2 | -- Copyright : (c) Sam T. 2013 | ||
3 | -- License : MIT | ||
4 | -- Maintainer : pxqr.sta@gmail.com | ||
5 | -- Stability : experimental | ||
6 | -- Portability : portable | ||
7 | -- | ||
8 | -- Torrent file contains metadata about files and folders but not | ||
9 | -- content itself. The files are bencoded dictionaries. There is | ||
10 | -- also other info which is used to help join the swarm. | ||
11 | -- | ||
12 | -- This module provides torrent metainfo serialization and info hash | ||
13 | -- extraction. | ||
14 | -- | ||
15 | -- For more info see: | ||
16 | -- <http://www.bittorrent.org/beps/bep_0003.html#metainfo-files>, | ||
17 | -- <https://wiki.theory.org/BitTorrentSpecification#Metainfo_File_Structure> | ||
18 | -- | ||
19 | {-# LANGUAGE CPP #-} | ||
20 | {-# LANGUAGE FlexibleInstances #-} | ||
21 | {-# LANGUAGE BangPatterns #-} | ||
22 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} | ||
23 | {-# LANGUAGE TemplateHaskell #-} | ||
24 | {-# OPTIONS -fno-warn-orphans #-} | ||
25 | -- TODO refine interface | ||
26 | module Data.Torrent | ||
27 | ( -- * Torrent | ||
28 | Torrent(..), ContentInfo(..), FileInfo(..) | ||
29 | , mktorrent, simpleTorrent | ||
30 | , torrentExt, isTorrentPath | ||
31 | , fromFile | ||
32 | |||
33 | -- * Files layout | ||
34 | , Layout, contentLayout | ||
35 | , contentLength, fileCount, fileOffset | ||
36 | , pieceCount, blockCount | ||
37 | , isSingleFile, isMultiFile | ||
38 | |||
39 | , checkPiece | ||
40 | |||
41 | -- * Info hash | ||
42 | #if defined (TESTING) | ||
43 | , InfoHash(..) | ||
44 | #else | ||
45 | , InfoHash(..) | ||
46 | #endif | ||
47 | , ppInfoHash | ||
48 | , addHashToURI | ||
49 | |||
50 | -- * Extra | ||
51 | , sizeInBase | ||
52 | |||
53 | -- #if defined (TESTING) | ||
54 | -- * Internal | ||
55 | , Data.Torrent.hash | ||
56 | , Data.Torrent.hashlazy | ||
57 | , layoutOffsets | ||
58 | , slice | ||
59 | -- #endif | ||
60 | ) where | ||
61 | |||
62 | import Prelude hiding (sum) | ||
63 | |||
64 | import Control.Applicative | ||
65 | import Control.Arrow | ||
66 | import Control.Exception | ||
67 | import Control.Monad | ||
68 | |||
69 | import qualified Crypto.Hash.SHA1 as C | ||
70 | |||
71 | import Data.Aeson | ||
72 | import Data.Aeson.TH | ||
73 | import Data.BEncode as BE | ||
74 | import Data.Char | ||
75 | import Data.Foldable | ||
76 | import qualified Data.ByteString as B | ||
77 | import Data.ByteString.Internal | ||
78 | import qualified Data.ByteString.Char8 as BC (pack, unpack) | ||
79 | import qualified Data.ByteString.Lazy as Lazy | ||
80 | import qualified Data.ByteString.Lazy.Builder as B | ||
81 | import qualified Data.ByteString.Lazy.Builder.ASCII as B | ||
82 | import qualified Data.List as L | ||
83 | import Data.Map (Map) | ||
84 | import qualified Data.Map as M | ||
85 | import Data.Hashable as Hashable | ||
86 | import Data.Text (Text) | ||
87 | import Data.Serialize as S hiding (Result) | ||
88 | import Text.PrettyPrint | ||
89 | import Text.ParserCombinators.ReadP as P | ||
90 | |||
91 | import Network.URI | ||
92 | import System.FilePath | ||
93 | import Numeric | ||
94 | |||
95 | {----------------------------------------------------------------------- | ||
96 | Info hash | ||
97 | -----------------------------------------------------------------------} | ||
98 | |||
99 | -- | Exactly 20 bytes long SHA1 hash of the info part of torrent file. | ||
100 | newtype InfoHash = InfoHash { getInfoHash :: ByteString } | ||
101 | deriving (Eq, Ord, ToJSON, FromJSON) | ||
102 | |||
103 | instance Show InfoHash where | ||
104 | show = render . ppInfoHash | ||
105 | |||
106 | instance Read InfoHash where | ||
107 | readsPrec _ = readP_to_S $ do | ||
108 | str <- replicateM 40 (satisfy isHexDigit) | ||
109 | return $ InfoHash $ decodeIH str | ||
110 | where | ||
111 | decodeIH = B.pack . map fromHex . pair | ||
112 | fromHex (a, b) = read $ '0' : 'x' : a : b : [] | ||
113 | |||
114 | pair (a : b : xs) = (a, b) : pair xs | ||
115 | pair _ = [] | ||
116 | |||
117 | instance Hashable InfoHash where | ||
118 | hash = Hashable.hash . getInfoHash | ||
119 | |||
120 | instance BEncodable InfoHash where | ||
121 | toBEncode = toBEncode . getInfoHash | ||
122 | fromBEncode be = InfoHash <$> fromBEncode be | ||
123 | |||
124 | instance Serialize InfoHash where | ||
125 | put = putByteString . getInfoHash | ||
126 | get = InfoHash <$> getBytes 20 | ||
127 | |||
128 | instance BEncodable a => BEncodable (Map InfoHash a) where | ||
129 | {-# SPECIALIZE instance BEncodable a => BEncodable (Map InfoHash a) #-} | ||
130 | fromBEncode b = M.mapKeys InfoHash <$> fromBEncode b | ||
131 | {-# INLINE fromBEncode #-} | ||
132 | |||
133 | toBEncode = toBEncode . M.mapKeys getInfoHash | ||
134 | {-# INLINE toBEncode #-} | ||
135 | |||
136 | -- | Hash strict bytestring using SHA1 algorithm. | ||
137 | hash :: ByteString -> InfoHash | ||
138 | hash = InfoHash . C.hash | ||
139 | |||
140 | -- | Hash lazy bytestring using SHA1 algorithm. | ||
141 | hashlazy :: Lazy.ByteString -> InfoHash | ||
142 | hashlazy = InfoHash . C.hashlazy | ||
143 | |||
144 | -- | Pretty print info hash in hexadecimal format. | ||
145 | ppInfoHash :: InfoHash -> Doc | ||
146 | ppInfoHash = text . BC.unpack . ppHex . getInfoHash | ||
147 | |||
148 | ppHex :: ByteString -> ByteString | ||
149 | ppHex = Lazy.toStrict . B.toLazyByteString . B.byteStringHexFixed | ||
150 | |||
151 | -- | Add query info hash parameter to uri. | ||
152 | -- | ||
153 | -- > info_hash=<url_encoded_info_hash> | ||
154 | -- | ||
155 | addHashToURI :: URI -> InfoHash -> URI | ||
156 | addHashToURI uri s = uri { | ||
157 | uriQuery = uriQuery uri ++ mkPref (uriQuery uri) ++ | ||
158 | "info_hash=" ++ rfc1738Encode (BC.unpack (getInfoHash s)) | ||
159 | } | ||
160 | where | ||
161 | mkPref [] = "?" | ||
162 | mkPref ('?' : _) = "&" | ||
163 | mkPref _ = error "addHashToURI" | ||
164 | |||
165 | rfc1738Encode = L.concatMap (\c -> if unreservedS c then [c] else encodeHex c) | ||
166 | where | ||
167 | unreservedS = (`L.elem` chars) | ||
168 | chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./" | ||
169 | encodeHex c = '%' : pHex c | ||
170 | pHex c = let p = (showHex . ord $ c) "" | ||
171 | in if L.length p == 1 then '0' : p else p | ||
172 | |||
173 | {----------------------------------------------------------------------- | ||
174 | Torrent metainfo | ||
175 | -----------------------------------------------------------------------} | ||
176 | |||
177 | type Time = Text | ||
178 | |||
179 | -- | Contain info about one single file. | ||
180 | data FileInfo = FileInfo { | ||
181 | fiLength :: !Integer | ||
182 | -- ^ Length of the file in bytes. | ||
183 | |||
184 | , fiMD5sum :: Maybe ByteString | ||
185 | -- ^ 32 character long MD5 sum of the file. | ||
186 | -- Used by third-party tools, not by bittorrent protocol itself. | ||
187 | |||
188 | , fiPath :: ![ByteString] | ||
189 | -- ^ One or more string elements that together represent the | ||
190 | -- path and filename. Each element in the list corresponds to | ||
191 | -- either a directory name or (in the case of the last | ||
192 | -- element) the filename. For example, the file: | ||
193 | -- | ||
194 | -- > "dir1/dir2/file.ext" | ||
195 | -- | ||
196 | -- would consist of three string elements: | ||
197 | -- | ||
198 | -- > ["dir1", "dir2", "file.ext"] | ||
199 | -- | ||
200 | } deriving (Show, Read, Eq) | ||
201 | |||
202 | $(deriveJSON (L.map toLower . L.dropWhile isLower) ''FileInfo) | ||
203 | |||
204 | |||
205 | -- | Info part of the .torrent file contain info about each content file. | ||
206 | data ContentInfo = | ||
207 | SingleFile { | ||
208 | ciLength :: !Integer | ||
209 | -- ^ Length of the file in bytes. | ||
210 | |||
211 | , ciMD5sum :: Maybe ByteString | ||
212 | -- ^ 32 character long MD5 sum of the file. | ||
213 | -- Used by third-party tools, not by bittorrent protocol itself. | ||
214 | |||
215 | , ciName :: !ByteString | ||
216 | -- ^ Suggested name of the file single file. | ||
217 | |||
218 | |||
219 | |||
220 | , ciPieceLength :: !Int | ||
221 | -- ^ Number of bytes in each piece. | ||
222 | |||
223 | , ciPieces :: !ByteString | ||
224 | -- ^ Concatenation of all 20-byte SHA1 hash values. | ||
225 | |||
226 | , ciPrivate :: Maybe Bool | ||
227 | -- ^ If set the client MUST publish its presence to get other | ||
228 | -- peers ONLY via the trackers explicity described in the | ||
229 | -- metainfo file. | ||
230 | -- | ||
231 | -- BEP 27: <http://www.bittorrent.org/beps/bep_0027.html> | ||
232 | } | ||
233 | |||
234 | | MultiFile { | ||
235 | ciFiles :: ![FileInfo] | ||
236 | -- ^ List of the all files that torrent contains. | ||
237 | |||
238 | , ciName :: !ByteString | ||
239 | -- | The file path of the directory in which to store all the files. | ||
240 | |||
241 | , ciPieceLength :: !Int | ||
242 | , ciPieces :: !ByteString | ||
243 | , ciPrivate :: Maybe Bool | ||
244 | } deriving (Show, Read, Eq) | ||
245 | |||
246 | $(deriveJSON id ''ContentInfo) | ||
247 | |||
248 | -- TODO more convenient form of torrent info. | ||
249 | -- | Metainfo about particular torrent. | ||
250 | data Torrent = Torrent { | ||
251 | tInfoHash :: !InfoHash | ||
252 | -- ^ SHA1 hash of the 'TorrentInfo' of the 'Torrent'. | ||
253 | |||
254 | , tAnnounce :: !URI | ||
255 | -- ^ The URL of the tracker. | ||
256 | |||
257 | -- NOTE: out of lexicographic order! | ||
258 | , tInfo :: !ContentInfo | ||
259 | -- ^ Info about each content file. | ||
260 | |||
261 | , tAnnounceList :: Maybe [[URI]] | ||
262 | -- ^ Announce list add multiple tracker support. | ||
263 | -- | ||
264 | -- BEP 12: <http://www.bittorrent.org/beps/bep_0012.html> | ||
265 | |||
266 | , tComment :: Maybe Text | ||
267 | -- ^ Free-form comments of the author. | ||
268 | |||
269 | , tCreatedBy :: Maybe ByteString | ||
270 | -- ^ Name and version of the program used to create the .torrent. | ||
271 | |||
272 | , tCreationDate :: Maybe Time | ||
273 | -- ^ Creation time of the torrent, in standard UNIX epoch. | ||
274 | |||
275 | , tEncoding :: Maybe ByteString | ||
276 | -- ^ String encoding format used to generate the pieces part of | ||
277 | -- the info dictionary in the .torrent metafile. | ||
278 | |||
279 | , tPublisher :: Maybe URI | ||
280 | -- ^ Containing the RSA public key of the publisher of the | ||
281 | -- torrent. Private counterpart of this key that has the | ||
282 | -- authority to allow new peers onto the swarm. | ||
283 | |||
284 | , tPublisherURL :: Maybe URI | ||
285 | , tSignature :: Maybe ByteString | ||
286 | -- ^ The RSA signature of the info dictionary (specifically, | ||
287 | -- the encrypted SHA-1 hash of the info dictionary). | ||
288 | } deriving (Show, Eq) | ||
289 | |||
290 | instance Hashable Torrent where | ||
291 | hash = Hashable.hash . tInfoHash | ||
292 | |||
293 | {- note that info hash is actually reduntant field | ||
294 | but it's better to keep it here to avoid heavy recomputations | ||
295 | -} | ||
296 | |||
297 | -- | Smart constructor for 'Torrent' which compute info hash. | ||
298 | mktorrent :: URI -> ContentInfo | ||
299 | -> Maybe [[URI]] -> Maybe Text -> Maybe ByteString | ||
300 | -> Maybe Time -> Maybe ByteString -> Maybe URI | ||
301 | -> Maybe URI -> Maybe ByteString | ||
302 | -> Torrent | ||
303 | mktorrent announce info = Torrent (hashlazy (BE.encoded info)) announce info | ||
304 | |||
305 | -- | A simple torrent contains only required fields. | ||
306 | simpleTorrent :: URI -> ContentInfo -> Torrent | ||
307 | simpleTorrent announce info = mktorrent announce info | ||
308 | Nothing Nothing Nothing | ||
309 | Nothing Nothing Nothing | ||
310 | Nothing Nothing | ||
311 | |||
312 | -- TODO check if pieceLength is power of 2 | ||
313 | |||
314 | instance BEncodable URI where | ||
315 | toBEncode uri = toBEncode (BC.pack (uriToString id uri "")) | ||
316 | {-# INLINE toBEncode #-} | ||
317 | |||
318 | fromBEncode (BString s) | Just url <- parseURI (BC.unpack s) = return url | ||
319 | fromBEncode b = decodingError $ "url <" ++ show b ++ ">" | ||
320 | {-# INLINE fromBEncode #-} | ||
321 | |||
322 | instance BEncodable Torrent where | ||
323 | toBEncode Torrent {..} = fromAscAssocs | ||
324 | [ "announce" --> tAnnounce | ||
325 | , "announce-list" -->? tAnnounceList | ||
326 | , "comment" -->? tComment | ||
327 | , "created by" -->? tCreatedBy | ||
328 | , "creation date" -->? tCreationDate | ||
329 | , "encoding" -->? tEncoding | ||
330 | , "info" --> tInfo | ||
331 | , "publisher" -->? tPublisher | ||
332 | , "publisher-url" -->? tPublisherURL | ||
333 | , "signature" -->? tSignature | ||
334 | ] | ||
335 | |||
336 | fromBEncode (BDict d) | Just info <- M.lookup "info" d = | ||
337 | Torrent <$> pure (hashlazy (BE.encode info)) -- WARN | ||
338 | <*> d >-- "announce" | ||
339 | <*> d >-- "info" | ||
340 | <*> d >--? "announce-list" | ||
341 | <*> d >--? "comment" | ||
342 | <*> d >--? "created by" | ||
343 | <*> d >--? "creation date" | ||
344 | <*> d >--? "encoding" | ||
345 | <*> d >--? "publisher" | ||
346 | <*> d >--? "publisher-url" | ||
347 | <*> d >--? "signature" | ||
348 | |||
349 | fromBEncode _ = decodingError "Torrent" | ||
350 | |||
351 | |||
352 | instance BEncodable ContentInfo where | ||
353 | toBEncode SingleFile {..} = fromAscAssocs | ||
354 | [ "length" --> ciLength | ||
355 | , "md5sum" -->? ciMD5sum | ||
356 | , "name" --> ciName | ||
357 | |||
358 | , "piece length" --> ciPieceLength | ||
359 | , "pieces" --> ciPieces | ||
360 | , "private" -->? ciPrivate | ||
361 | ] | ||
362 | |||
363 | toBEncode MultiFile {..} = fromAscAssocs | ||
364 | [ "files" --> ciFiles | ||
365 | , "name" --> ciName | ||
366 | |||
367 | , "piece length" --> ciPieceLength | ||
368 | , "pieces" --> ciPieces | ||
369 | , "private" -->? ciPrivate | ||
370 | ] | ||
371 | |||
372 | fromBEncode (BDict d) | ||
373 | | Just (BList fs) <- M.lookup "files" d = | ||
374 | MultiFile <$> mapM fromBEncode fs | ||
375 | <*> d >-- "name" | ||
376 | <*> d >-- "piece length" | ||
377 | <*> d >-- "pieces" | ||
378 | <*> d >--? "private" | ||
379 | | otherwise = | ||
380 | SingleFile <$> d >-- "length" | ||
381 | <*> d >--? "md5sum" | ||
382 | <*> d >-- "name" | ||
383 | <*> d >-- "piece length" | ||
384 | <*> d >-- "pieces" | ||
385 | <*> d >--? "private" | ||
386 | fromBEncode _ = decodingError "ContentInfo" | ||
387 | |||
388 | |||
389 | instance BEncodable FileInfo where | ||
390 | toBEncode FileInfo {..} = fromAssocs | ||
391 | [ "length" --> fiLength | ||
392 | , "md5sum" -->? fiMD5sum | ||
393 | , "path" --> fiPath | ||
394 | ] | ||
395 | |||
396 | fromBEncode (BDict d) = | ||
397 | FileInfo <$> d >-- "length" | ||
398 | <*> d >--? "md5sum" | ||
399 | <*> d >-- "path" | ||
400 | |||
401 | fromBEncode _ = decodingError "FileInfo" | ||
402 | |||
403 | |||
404 | -- | Divide and round up. | ||
405 | sizeInBase :: Integral a => a -> Int -> Int | ||
406 | sizeInBase n b = fromIntegral (n `div` fromIntegral b) + align | ||
407 | where | ||
408 | align = if n `mod` fromIntegral b == 0 then 0 else 1 | ||
409 | {-# SPECIALIZE sizeInBase :: Int -> Int -> Int #-} | ||
410 | {-# SPECIALIZE sizeInBase :: Integer -> Int -> Int #-} | ||
411 | |||
412 | |||
413 | -- | Find sum of sizes of the all torrent files. | ||
414 | contentLength :: ContentInfo -> Integer | ||
415 | contentLength SingleFile { ciLength = len } = len | ||
416 | contentLength MultiFile { ciFiles = tfs } = sum (map fiLength tfs) | ||
417 | |||
418 | -- | Get count of all files in torrent. | ||
419 | fileCount :: ContentInfo -> Int | ||
420 | fileCount SingleFile {..} = 1 | ||
421 | fileCount MultiFile {..} = length ciFiles | ||
422 | |||
423 | -- | Find count of pieces in the torrent. If torrent size is not a | ||
424 | -- multiple of piece size then the count is rounded up. | ||
425 | pieceCount :: ContentInfo -> Int | ||
426 | pieceCount ci = contentLength ci `sizeInBase` ciPieceLength ci | ||
427 | |||
428 | -- | Find number of blocks of the specified size. If torrent size is | ||
429 | -- not a multiple of block size then the count is rounded up. | ||
430 | blockCount :: Int -- ^ Block size. | ||
431 | -> ContentInfo -- ^ Torrent content info. | ||
432 | -> Int -- ^ Number of blocks. | ||
433 | blockCount blkSize ci = contentLength ci `sizeInBase` blkSize | ||
434 | |||
435 | -- | File layout specifies the order and the size of each file in the | ||
436 | -- storage. Note that order of files is highly important since we | ||
437 | -- coalesce all the files in the given order to get the linear block | ||
438 | -- address space. | ||
439 | -- | ||
440 | type Layout = [(FilePath, Int)] | ||
441 | |||
442 | -- | Extract files layout from torrent info with the given root path. | ||
443 | contentLayout :: FilePath -- ^ Root path for the all torrent files. | ||
444 | -> ContentInfo -- ^ Torrent content information. | ||
445 | -> Layout -- ^ The all file paths prefixed with the | ||
446 | -- given root. | ||
447 | contentLayout rootPath = filesLayout | ||
448 | where | ||
449 | filesLayout (SingleFile { ciName = name, ciLength = len }) | ||
450 | = [(rootPath </> BC.unpack name, fromIntegral len)] | ||
451 | filesLayout (MultiFile { ciFiles = fs, ciName = dir }) = | ||
452 | map (first mkPath . fl) fs | ||
453 | where -- TODO use utf8 encoding in name | ||
454 | mkPath = ((rootPath </> BC.unpack dir) </>) . joinPath . map BC.unpack | ||
455 | |||
456 | fl (FileInfo { fiPath = p, fiLength = len }) = (p, fromIntegral len) | ||
457 | |||
458 | layoutOffsets :: Layout -> Layout | ||
459 | layoutOffsets = go 0 | ||
460 | where | ||
461 | go !_ [] = [] | ||
462 | go !offset ((n, s) : xs) = (n, offset) : go (offset + s) xs | ||
463 | |||
464 | -- | Gives global offset of a content file for a given full path. | ||
465 | fileOffset :: FilePath -> ContentInfo -> Maybe Integer | ||
466 | fileOffset fullPath | ||
467 | = fmap fromIntegral . lookup fullPath . layoutOffsets . contentLayout "" | ||
468 | |||
469 | -- | Test if this is single file torrent. | ||
470 | isSingleFile :: ContentInfo -> Bool | ||
471 | isSingleFile SingleFile {} = True | ||
472 | isSingleFile _ = False | ||
473 | {-# INLINE isSingleFile #-} | ||
474 | |||
475 | -- | Test if this is multifile torrent. | ||
476 | isMultiFile :: ContentInfo -> Bool | ||
477 | isMultiFile MultiFile {} = True | ||
478 | isMultiFile _ = False | ||
479 | {-# INLINE isMultiFile #-} | ||
480 | |||
481 | slice :: Int -> Int -> ByteString -> ByteString | ||
482 | slice from siz = B.take siz . B.drop from | ||
483 | {-# INLINE slice #-} | ||
484 | |||
485 | -- | Extract validation hash by specified piece index. | ||
486 | pieceHash :: ContentInfo -> Int -> ByteString | ||
487 | pieceHash ci ix = slice (hashsize * ix) hashsize (ciPieces ci) | ||
488 | where | ||
489 | hashsize = 20 | ||
490 | |||
491 | -- | Validate piece with metainfo hash. | ||
492 | checkPiece :: ContentInfo -> Int -> ByteString -> Bool | ||
493 | checkPiece ci ix piece | ||
494 | = B.length piece == ciPieceLength ci | ||
495 | && C.hash piece == pieceHash ci ix | ||
496 | |||
497 | -- | Extension usually used for torrent metafiles. | ||
498 | torrentExt :: String | ||
499 | torrentExt = "torrent" | ||
500 | |||
501 | -- | Test if this path has proper extension. | ||
502 | isTorrentPath :: FilePath -> Bool | ||
503 | isTorrentPath filepath = takeExtension filepath == extSeparator : torrentExt | ||
504 | |||
505 | -- | Read and decode a .torrent file. | ||
506 | fromFile :: FilePath -> IO Torrent | ||
507 | fromFile filepath = do | ||
508 | contents <- B.readFile filepath | ||
509 | case decoded contents of | ||
510 | Right !t -> return t | ||
511 | Left msg -> throwIO $ userError $ msg ++ " while reading torrent" \ No newline at end of file | ||
diff --git a/src/Network/BitTorrent.hs b/src/Network/BitTorrent.hs index e68d1597..7ff85b39 100644 --- a/src/Network/BitTorrent.hs +++ b/src/Network/BitTorrent.hs | |||
@@ -7,7 +7,7 @@ | |||
7 | -- | 7 | -- |
8 | {-# LANGUAGE RecordWildCards #-} | 8 | {-# LANGUAGE RecordWildCards #-} |
9 | module Network.BitTorrent | 9 | module Network.BitTorrent |
10 | ( module Data.Torrent | 10 | ( module Data.Torrent.Metainfo |
11 | 11 | ||
12 | , TorrentLoc(..), TorrentMap, Progress(..) | 12 | , TorrentLoc(..), TorrentMap, Progress(..) |
13 | , ThreadCount, SessionCount | 13 | , ThreadCount, SessionCount |
@@ -49,7 +49,7 @@ import Text.PrettyPrint | |||
49 | import System.Directory | 49 | import System.Directory |
50 | import System.FilePath | 50 | import System.FilePath |
51 | 51 | ||
52 | import Data.Torrent | 52 | import Data.Torrent.Metainfo |
53 | import Network.BitTorrent.Sessions.Types | 53 | import Network.BitTorrent.Sessions.Types |
54 | import Network.BitTorrent.Sessions | 54 | import Network.BitTorrent.Sessions |
55 | import Network.BitTorrent.Extension | 55 | import Network.BitTorrent.Extension |
diff --git a/src/Network/BitTorrent/DHT/Protocol.hs b/src/Network/BitTorrent/DHT/Protocol.hs index b0100b70..73e5fa68 100644 --- a/src/Network/BitTorrent/DHT/Protocol.hs +++ b/src/Network/BitTorrent/DHT/Protocol.hs | |||
@@ -32,7 +32,7 @@ import System.Entropy | |||
32 | import Remote.KRPC | 32 | import Remote.KRPC |
33 | import Remote.KRPC.Protocol | 33 | import Remote.KRPC.Protocol |
34 | import Data.BEncode | 34 | import Data.BEncode |
35 | import Data.Torrent | 35 | import Data.Torrent.Metainfo |
36 | import Network.BitTorrent.Peer | 36 | import Network.BitTorrent.Peer |
37 | import Network.BitTorrent.Exchange.Protocol () | 37 | import Network.BitTorrent.Exchange.Protocol () |
38 | 38 | ||
diff --git a/src/Network/BitTorrent/Exchange.hs b/src/Network/BitTorrent/Exchange.hs index 0f1d2833..e81880b4 100644 --- a/src/Network/BitTorrent/Exchange.hs +++ b/src/Network/BitTorrent/Exchange.hs | |||
@@ -91,7 +91,8 @@ import Text.PrettyPrint as PP hiding (($$)) | |||
91 | 91 | ||
92 | import Network | 92 | import Network |
93 | 93 | ||
94 | import Data.Bitfield as BF | 94 | import Data.Torrent.Block |
95 | import Data.Torrent.Bitfield as BF | ||
95 | import Network.BitTorrent.Extension | 96 | import Network.BitTorrent.Extension |
96 | import Network.BitTorrent.Exchange.Protocol | 97 | import Network.BitTorrent.Exchange.Protocol |
97 | import Network.BitTorrent.Sessions.Types | 98 | import Network.BitTorrent.Sessions.Types |
diff --git a/src/Network/BitTorrent/Exchange/Protocol.hs b/src/Network/BitTorrent/Exchange/Protocol.hs index 00b6795b..3b2472da 100644 --- a/src/Network/BitTorrent/Exchange/Protocol.hs +++ b/src/Network/BitTorrent/Exchange/Protocol.hs | |||
@@ -37,16 +37,6 @@ module Network.BitTorrent.Exchange.Protocol | |||
37 | , defaultHandshake, defaultBTProtocol, defaultReserved | 37 | , defaultHandshake, defaultBTProtocol, defaultReserved |
38 | , handshakeMaxSize | 38 | , handshakeMaxSize |
39 | 39 | ||
40 | -- * Block | ||
41 | , PieceIx, BlockLIx, PieceLIx | ||
42 | , BlockIx(..), ppBlockIx | ||
43 | , Block(..), ppBlock ,blockSize | ||
44 | , pieceIx, blockIx | ||
45 | , blockRange, ixRange, isPiece | ||
46 | |||
47 | -- ** Defaults | ||
48 | , defaultBlockSize | ||
49 | |||
50 | -- * Regular messages | 40 | -- * Regular messages |
51 | , Message(..) | 41 | , Message(..) |
52 | , ppMessage | 42 | , ppMessage |
@@ -89,12 +79,28 @@ import Text.PrettyPrint | |||
89 | import Network | 79 | import Network |
90 | import Network.Socket.ByteString | 80 | import Network.Socket.ByteString |
91 | 81 | ||
92 | import Data.Bitfield | 82 | import Data.Torrent.Bitfield |
93 | import Data.Torrent | 83 | import Data.Torrent.Block |
84 | import Data.Torrent.Metainfo | ||
94 | import Network.BitTorrent.Extension | 85 | import Network.BitTorrent.Extension |
95 | import Network.BitTorrent.Peer | 86 | import Network.BitTorrent.Peer |
96 | 87 | ||
97 | 88 | ||
89 | getInt :: S.Get Int | ||
90 | getInt = fromIntegral <$> S.getWord32be | ||
91 | {-# INLINE getInt #-} | ||
92 | |||
93 | putInt :: S.Putter Int | ||
94 | putInt = S.putWord32be . fromIntegral | ||
95 | {-# INLINE putInt #-} | ||
96 | |||
97 | getIntB :: B.Get Int | ||
98 | getIntB = fromIntegral <$> B.getWord32be | ||
99 | {-# INLINE getIntB #-} | ||
100 | |||
101 | putIntB :: Int -> B.Put | ||
102 | putIntB = B.putWord32be . fromIntegral | ||
103 | {-# INLINE putIntB #-} | ||
98 | 104 | ||
99 | {----------------------------------------------------------------------- | 105 | {----------------------------------------------------------------------- |
100 | Handshake | 106 | Handshake |
@@ -196,128 +202,6 @@ handshake sock hs = do | |||
196 | return hs' | 202 | return hs' |
197 | 203 | ||
198 | {----------------------------------------------------------------------- | 204 | {----------------------------------------------------------------------- |
199 | Block Index | ||
200 | -----------------------------------------------------------------------} | ||
201 | |||
202 | type BlockLIx = Int | ||
203 | type PieceLIx = Int | ||
204 | |||
205 | |||
206 | data BlockIx = BlockIx { | ||
207 | -- | Zero-based piece index. | ||
208 | ixPiece :: {-# UNPACK #-} !PieceLIx | ||
209 | |||
210 | -- | Zero-based byte offset within the piece. | ||
211 | , ixOffset :: {-# UNPACK #-} !Int | ||
212 | |||
213 | -- | Block size starting from offset. | ||
214 | , ixLength :: {-# UNPACK #-} !Int | ||
215 | } deriving (Show, Eq) | ||
216 | |||
217 | $(deriveJSON (L.map toLower . L.dropWhile isLower) ''BlockIx) | ||
218 | |||
219 | getInt :: S.Get Int | ||
220 | getInt = fromIntegral <$> S.getWord32be | ||
221 | {-# INLINE getInt #-} | ||
222 | |||
223 | putInt :: S.Putter Int | ||
224 | putInt = S.putWord32be . fromIntegral | ||
225 | {-# INLINE putInt #-} | ||
226 | |||
227 | getIntB :: B.Get Int | ||
228 | getIntB = fromIntegral <$> B.getWord32be | ||
229 | {-# INLINE getIntB #-} | ||
230 | |||
231 | putIntB :: Int -> B.Put | ||
232 | putIntB = B.putWord32be . fromIntegral | ||
233 | {-# INLINE putIntB #-} | ||
234 | |||
235 | instance Serialize BlockIx where | ||
236 | {-# SPECIALIZE instance Serialize BlockIx #-} | ||
237 | get = BlockIx <$> getInt <*> getInt <*> getInt | ||
238 | {-# INLINE get #-} | ||
239 | |||
240 | put i = do putInt (ixPiece i) | ||
241 | putInt (ixOffset i) | ||
242 | putInt (ixLength i) | ||
243 | {-# INLINE put #-} | ||
244 | |||
245 | instance Binary BlockIx where | ||
246 | {-# SPECIALIZE instance Binary BlockIx #-} | ||
247 | get = BlockIx <$> getIntB <*> getIntB <*> getIntB | ||
248 | {-# INLINE get #-} | ||
249 | |||
250 | put BlockIx {..} = do | ||
251 | putIntB ixPiece | ||
252 | putIntB ixOffset | ||
253 | putIntB ixLength | ||
254 | |||
255 | -- | Format block index in human readable form. | ||
256 | ppBlockIx :: BlockIx -> Doc | ||
257 | ppBlockIx BlockIx {..} = | ||
258 | "piece = " <> int ixPiece <> "," <+> | ||
259 | "offset = " <> int ixOffset <> "," <+> | ||
260 | "length = " <> int ixLength | ||
261 | |||
262 | {----------------------------------------------------------------------- | ||
263 | Block | ||
264 | -----------------------------------------------------------------------} | ||
265 | |||
266 | data Block = Block { | ||
267 | -- | Zero-based piece index. | ||
268 | blkPiece :: {-# UNPACK #-} !PieceLIx | ||
269 | |||
270 | -- | Zero-based byte offset within the piece. | ||
271 | , blkOffset :: {-# UNPACK #-} !Int | ||
272 | |||
273 | -- | Payload. | ||
274 | , blkData :: !Lazy.ByteString | ||
275 | } deriving (Show, Eq) | ||
276 | |||
277 | -- | Format block in human readable form. Payload is ommitted. | ||
278 | ppBlock :: Block -> Doc | ||
279 | ppBlock = ppBlockIx . blockIx | ||
280 | |||
281 | blockSize :: Block -> Int | ||
282 | blockSize blk = fromIntegral (Lazy.length (blkData blk)) | ||
283 | {-# INLINE blockSize #-} | ||
284 | |||
285 | -- | Widely used semi-official block size. | ||
286 | defaultBlockSize :: Int | ||
287 | defaultBlockSize = 16 * 1024 | ||
288 | |||
289 | |||
290 | isPiece :: Int -> Block -> Bool | ||
291 | isPiece pieceSize (Block i offset bs) = | ||
292 | offset == 0 | ||
293 | && fromIntegral (Lazy.length bs) == pieceSize | ||
294 | && i >= 0 | ||
295 | {-# INLINE isPiece #-} | ||
296 | |||
297 | pieceIx :: Int -> Int -> BlockIx | ||
298 | pieceIx i = BlockIx i 0 | ||
299 | {-# INLINE pieceIx #-} | ||
300 | |||
301 | blockIx :: Block -> BlockIx | ||
302 | blockIx = BlockIx <$> blkPiece <*> blkOffset <*> blockSize | ||
303 | |||
304 | blockRange :: (Num a, Integral a) => Int -> Block -> (a, a) | ||
305 | blockRange pieceSize blk = (offset, offset + len) | ||
306 | where | ||
307 | offset = fromIntegral pieceSize * fromIntegral (blkPiece blk) | ||
308 | + fromIntegral (blkOffset blk) | ||
309 | len = fromIntegral (Lazy.length (blkData blk)) | ||
310 | {-# INLINE blockRange #-} | ||
311 | |||
312 | ixRange :: (Num a, Integral a) => Int -> BlockIx -> (a, a) | ||
313 | ixRange pieceSize i = (offset, offset + len) | ||
314 | where | ||
315 | offset = fromIntegral pieceSize * fromIntegral (ixPiece i) | ||
316 | + fromIntegral (ixOffset i) | ||
317 | len = fromIntegral (ixLength i) | ||
318 | {-# INLINE ixRange #-} | ||
319 | |||
320 | {----------------------------------------------------------------------- | ||
321 | Regular messages | 205 | Regular messages |
322 | -----------------------------------------------------------------------} | 206 | -----------------------------------------------------------------------} |
323 | 207 | ||
diff --git a/src/Network/BitTorrent/Sessions.hs b/src/Network/BitTorrent/Sessions.hs index 1d0d21b4..ba0c60a1 100644 --- a/src/Network/BitTorrent/Sessions.hs +++ b/src/Network/BitTorrent/Sessions.hs | |||
@@ -73,8 +73,8 @@ import Network hiding (accept) | |||
73 | import Network.BSD | 73 | import Network.BSD |
74 | import Network.Socket | 74 | import Network.Socket |
75 | 75 | ||
76 | import Data.Bitfield as BF | 76 | import Data.Torrent.Bitfield as BF |
77 | import Data.Torrent | 77 | import Data.Torrent.Metainfo |
78 | import Network.BitTorrent.Extension | 78 | import Network.BitTorrent.Extension |
79 | import Network.BitTorrent.Peer | 79 | import Network.BitTorrent.Peer |
80 | import Network.BitTorrent.Sessions.Types | 80 | import Network.BitTorrent.Sessions.Types |
diff --git a/src/Network/BitTorrent/Sessions/Types.lhs b/src/Network/BitTorrent/Sessions/Types.lhs index 5571e23a..e62e362f 100644 --- a/src/Network/BitTorrent/Sessions/Types.lhs +++ b/src/Network/BitTorrent/Sessions/Types.lhs | |||
@@ -59,8 +59,8 @@ | |||
59 | 59 | ||
60 | > import Network | 60 | > import Network |
61 | 61 | ||
62 | > import Data.Bitfield as BF | 62 | > import Data.Torrent.Bitfield as BF |
63 | > import Data.Torrent | 63 | > import Data.Torrent.Metainfo |
64 | > import Network.BitTorrent.Extension | 64 | > import Network.BitTorrent.Extension |
65 | > import Network.BitTorrent.Peer | 65 | > import Network.BitTorrent.Peer |
66 | > import Network.BitTorrent.Exchange.Protocol as BT | 66 | > import Network.BitTorrent.Exchange.Protocol as BT |
@@ -248,7 +248,7 @@ fresh required extensions. | |||
248 | Normally, you would have one client session, however, if we needed, in | 248 | Normally, you would have one client session, however, if we needed, in |
249 | one application we could have many clients with different peer ID's | 249 | one application we could have many clients with different peer ID's |
250 | and different enabled extensions at the same time. | 250 | and different enabled extensions at the same time. |
251 | 251 | ||
252 | > -- | | 252 | > -- | |
253 | > data ClientSession = ClientSession { | 253 | > data ClientSession = ClientSession { |
254 | > -- | Used in handshakes and discovery mechanism. | 254 | > -- | Used in handshakes and discovery mechanism. |
diff --git a/src/Network/BitTorrent/Tracker.hs b/src/Network/BitTorrent/Tracker.hs index e1e6ea71..e98f1e94 100644 --- a/src/Network/BitTorrent/Tracker.hs +++ b/src/Network/BitTorrent/Tracker.hs | |||
@@ -39,7 +39,7 @@ import Data.IORef | |||
39 | import Network | 39 | import Network |
40 | import Network.URI | 40 | import Network.URI |
41 | 41 | ||
42 | import Data.Torrent | 42 | import Data.Torrent.Metainfo |
43 | import Network.BitTorrent.Peer | 43 | import Network.BitTorrent.Peer |
44 | import Network.BitTorrent.Sessions.Types | 44 | import Network.BitTorrent.Sessions.Types |
45 | import Network.BitTorrent.Tracker.Protocol | 45 | import Network.BitTorrent.Tracker.Protocol |
diff --git a/src/Network/BitTorrent/Tracker/HTTP.hs b/src/Network/BitTorrent/Tracker/HTTP.hs index 0ada154b..f781b847 100644 --- a/src/Network/BitTorrent/Tracker/HTTP.hs +++ b/src/Network/BitTorrent/Tracker/HTTP.hs | |||
@@ -30,7 +30,7 @@ import Data.URLEncoded as URL | |||
30 | import Network.URI | 30 | import Network.URI |
31 | import Network.HTTP | 31 | import Network.HTTP |
32 | 32 | ||
33 | import Data.Torrent | 33 | import Data.Torrent.Metainfo |
34 | import Network.BitTorrent.Tracker.Protocol | 34 | import Network.BitTorrent.Tracker.Protocol |
35 | 35 | ||
36 | {----------------------------------------------------------------------- | 36 | {----------------------------------------------------------------------- |
diff --git a/src/Network/BitTorrent/Tracker/Protocol.hs b/src/Network/BitTorrent/Tracker/Protocol.hs index ee395883..c468656f 100644 --- a/src/Network/BitTorrent/Tracker/Protocol.hs +++ b/src/Network/BitTorrent/Tracker/Protocol.hs | |||
@@ -44,7 +44,7 @@ import Data.Text (Text) | |||
44 | import Data.Text.Encoding | 44 | import Data.Text.Encoding |
45 | import Data.Serialize hiding (Result) | 45 | import Data.Serialize hiding (Result) |
46 | import Data.URLEncoded as URL | 46 | import Data.URLEncoded as URL |
47 | import Data.Torrent | 47 | import Data.Torrent.Metainfo |
48 | 48 | ||
49 | import Network | 49 | import Network |
50 | import Network.Socket | 50 | import Network.Socket |
diff --git a/src/Network/BitTorrent/Tracker/UDP.hs b/src/Network/BitTorrent/Tracker/UDP.hs index 43de7663..13e1298b 100644 --- a/src/Network/BitTorrent/Tracker/UDP.hs +++ b/src/Network/BitTorrent/Tracker/UDP.hs | |||
@@ -24,7 +24,7 @@ import Data.Text.Encoding | |||
24 | import Network.Socket hiding (Connected) | 24 | import Network.Socket hiding (Connected) |
25 | import Network.Socket.ByteString as BS | 25 | import Network.Socket.ByteString as BS |
26 | 26 | ||
27 | import Data.Torrent () | 27 | import Data.Torrent.Metainfo () |
28 | import Network.BitTorrent.Tracker.Protocol | 28 | import Network.BitTorrent.Tracker.Protocol |
29 | 29 | ||
30 | 30 | ||
diff --git a/src/System/IO/MMap/Fixed.hs b/src/System/IO/MMap/Fixed.hs deleted file mode 100644 index 1e83c350..00000000 --- a/src/System/IO/MMap/Fixed.hs +++ /dev/null | |||
@@ -1,212 +0,0 @@ | |||
1 | -- TODO pprint | ||
2 | -- TODO see if this IntervalMap is overkill: Interval dataty have 4 constrs | ||
3 | -- TODO clarify lifetime in docs | ||
4 | -- TODO use madvise | ||
5 | -- TODO unmap selected interval | ||
6 | -- TODO tests | ||
7 | -- TODO benchmarks | ||
8 | -- TODO unmap overlapped regions | ||
9 | -- [A] TODO lazy mapping for 32 bit arch; | ||
10 | -- we need tricky algorithm and a lot of perf tests | ||
11 | -- TODO use memmove in write bytes | ||
12 | -- TODO write elem, write byte, read byte | ||
13 | -- | | ||
14 | -- Copyright : (c) Sam T. 2013 | ||
15 | -- License : MIT | ||
16 | -- Maintainer : pxqr.sta@gmail.com | ||
17 | -- Stability : experimental | ||
18 | -- Portability : portable | ||
19 | -- | ||
20 | -- This library provides mechanism to mmap files to fixed address | ||
21 | -- with fine-grained control. Hovewer, instead of using MAP_FIXED we | ||
22 | -- create our own address space upon virtual address space. If you | ||
23 | -- would like you could call this space as "fixed address space". | ||
24 | -- | ||
25 | -- This solves a few problems: | ||
26 | -- | ||
27 | -- * Page already in use. If you mmap one file at 0..x addresses and | ||
28 | -- want to map second file to x..y addresses using MAP_FIXED you | ||
29 | -- can get in troubles: page might be mapped already. Raw call to | ||
30 | -- mmap will silently unmap x..y addresses and then mmap our second | ||
31 | -- file. So here we have extra unmap we would like to avoid. | ||
32 | -- | ||
33 | -- * Page boundaries. If you mmap one file at x..x+1 you could | ||
34 | -- not map next file to say addresses x+1..x+2. | ||
35 | -- | ||
36 | -- Internally we make ordinary call to mmap to map a file and then | ||
37 | -- using /interval map/ we map fixed address space to virtual | ||
38 | -- address space. It takes TODO time in TODO cases. | ||
39 | -- | ||
40 | -- Basically this library could be used when we need coalesce | ||
41 | -- several files in arbitrary way. We could map at any position as | ||
42 | -- long as offset + size fit in 'Int'. | ||
43 | -- | ||
44 | -- For other details see: | ||
45 | -- | ||
46 | -- > http://hackage.haskell.org/package/mmap | ||
47 | -- > man mmap | ||
48 | -- | ||
49 | {-# LANGUAGE RecordWildCards #-} | ||
50 | module System.IO.MMap.Fixed | ||
51 | ( -- * Intervals | ||
52 | FixedOffset, FileOffset, FixedInterval, FileInterval | ||
53 | , interval, fileInterval | ||
54 | |||
55 | -- * Construction | ||
56 | , Fixed, Bytes | ||
57 | , System.IO.MMap.Fixed.empty, insertTo | ||
58 | , coalesceFiles | ||
59 | |||
60 | -- ** Specialized 'insertTo' | ||
61 | , mmapTo, mallocTo | ||
62 | , lookupRegion | ||
63 | |||
64 | -- * Query | ||
65 | , upperAddr | ||
66 | |||
67 | -- * Access | ||
68 | , viewBytes, readBytes, writeBytes | ||
69 | , readElem, writeElem | ||
70 | ) where | ||
71 | |||
72 | import Data.ByteString.Lazy as Lazy | ||
73 | import Data.ByteString.Lazy.Internal as Lazy | ||
74 | import Data.ByteString.Internal as B | ||
75 | import Data.List as L | ||
76 | import Data.Int | ||
77 | import Data.IntervalMap.Strict as M | ||
78 | import Data.IntervalMap.Interval | ||
79 | import System.IO.MMap | ||
80 | import Foreign | ||
81 | |||
82 | |||
83 | type FixedOffset = Int | ||
84 | type FileOffset = Int64 | ||
85 | type Size = Int | ||
86 | |||
87 | |||
88 | type FileInterval = (FileOffset, Size) | ||
89 | type FixedInterval = Interval FixedOffset | ||
90 | |||
91 | |||
92 | interval :: FixedOffset -> Size -> FixedInterval | ||
93 | interval off s = IntervalCO off (off + fromIntegral (max 0 s)) | ||
94 | {-# INLINE interval #-} | ||
95 | |||
96 | fileInterval :: FileOffset -> Size -> FileInterval | ||
97 | fileInterval off s = (off, s) | ||
98 | {-# INLINE fileInterval #-} | ||
99 | |||
100 | intervalSize :: FixedInterval -> Size | ||
101 | intervalSize i = upperBound i - lowerBound i | ||
102 | {-# INLINE intervalSize #-} | ||
103 | |||
104 | |||
105 | type Bytes = (ForeignPtr Word8, Size) | ||
106 | |||
107 | type FixedMap = IntervalMap FixedOffset Bytes | ||
108 | |||
109 | newtype Fixed = Fixed { imap :: FixedMap } | ||
110 | |||
111 | instance Show Fixed where | ||
112 | show = show . M.toList . imap | ||
113 | |||
114 | |||
115 | mapIM :: (FixedMap -> FixedMap) -> Fixed -> Fixed | ||
116 | mapIM f s = s { imap = f (imap s) } | ||
117 | |||
118 | empty :: Fixed | ||
119 | empty = Fixed M.empty | ||
120 | |||
121 | coalesceFiles :: [(FilePath, Int)] -> IO Fixed | ||
122 | coalesceFiles = go 0 System.IO.MMap.Fixed.empty | ||
123 | where | ||
124 | go _ s [] = return s | ||
125 | go offset s ((path, bsize) : xs) = do | ||
126 | s' <- mmapTo path (0, bsize) offset s | ||
127 | go (offset + bsize) s' xs | ||
128 | |||
129 | upperAddr :: Fixed -> FixedOffset | ||
130 | upperAddr = upperBound . fst . findLast . imap | ||
131 | |||
132 | insertTo :: FixedInterval -> Bytes -> Fixed -> Fixed | ||
133 | insertTo fi mm = mapIM (M.insert fi mm) | ||
134 | {-# INLINE insertTo #-} | ||
135 | |||
136 | mmapTo :: FilePath -> FileInterval -> FixedOffset -> Fixed -> IO Fixed | ||
137 | mmapTo path mrange to s = do | ||
138 | (fptr, offset, fsize) <- mmapFileForeignPtr path ReadWriteEx (Just mrange) | ||
139 | |||
140 | let fixed = interval to fsize | ||
141 | let mmaped = (fptr, offset) | ||
142 | |||
143 | return $ insertTo fixed mmaped s | ||
144 | |||
145 | mallocTo :: FixedInterval -> Fixed -> IO Fixed | ||
146 | mallocTo fi s = do | ||
147 | let bsize = intervalSize fi | ||
148 | fptr <- mallocForeignPtrBytes bsize | ||
149 | return (insertTo fi (fptr, 0) s) | ||
150 | |||
151 | lookupRegion :: FixedOffset -> Fixed -> Maybe B.ByteString | ||
152 | lookupRegion offset Fixed {..} = | ||
153 | case intersecting imap $ IntervalCO offset (succ offset) of | ||
154 | [(i, (fptr, off))] -> let s = upperBound i - lowerBound i | ||
155 | in Just $ fromForeignPtr fptr off (max 0 s) | ||
156 | _ -> Nothing | ||
157 | |||
158 | -- | Note: this is unsafe operation. | ||
159 | viewBytes :: FixedInterval -> Fixed -> Lazy.ByteString | ||
160 | viewBytes fi s = fromChunks $ L.map mk $ (imap s `intersecting` fi) | ||
161 | where | ||
162 | mk (i, (fptr, offset)) = | ||
163 | let dropB = max 0 (lowerBound fi - lowerBound i) | ||
164 | dropT = max 0 (upperBound i - upperBound fi) | ||
165 | bsize = intervalSize i - (dropT + dropB) | ||
166 | in fromForeignPtr fptr (offset + dropB) bsize | ||
167 | |||
168 | |||
169 | readBytes :: FixedInterval -> Fixed -> IO Lazy.ByteString | ||
170 | readBytes fi s = let c = Lazy.copy (viewBytes fi s) in mkCopy c >> return c | ||
171 | {-# INLINE readBytes #-} | ||
172 | |||
173 | writeBytes :: FixedInterval -> Lazy.ByteString -> Fixed -> IO () | ||
174 | writeBytes fi bs s = bscpy (viewBytes fi s) bs | ||
175 | {-# INLINE writeBytes #-} | ||
176 | |||
177 | -- | Note: this operation takes O(log(files count)) time, if possible | ||
178 | -- use readBytes. | ||
179 | readElem :: Storable a => Fixed -> FixedOffset -> IO a | ||
180 | readElem s offset = go undefined | ||
181 | where | ||
182 | go :: Storable a => a -> IO a | ||
183 | go dont_touch = do | ||
184 | let bsize = sizeOf dont_touch | ||
185 | let PS fptr off _ = Lazy.toStrict (viewBytes (interval offset bsize) s) | ||
186 | withForeignPtr fptr $ \ ptr -> peekByteOff ptr off | ||
187 | |||
188 | writeElem :: Storable a => Fixed -> FixedOffset -> a -> IO () | ||
189 | writeElem s offset x = do | ||
190 | let bsize = sizeOf x | ||
191 | let PS fptr off _ = Lazy.toStrict (viewBytes (interval offset bsize) s) | ||
192 | withForeignPtr fptr $ \ptr -> pokeByteOff ptr off x | ||
193 | |||
194 | |||
195 | mkCopy :: Lazy.ByteString -> IO () | ||
196 | mkCopy Empty = return () | ||
197 | mkCopy (Chunk _ x) = mkCopy x | ||
198 | |||
199 | bscpy :: Lazy.ByteString -> Lazy.ByteString -> IO () | ||
200 | bscpy (PS _ _ 0 `Chunk` dest_rest) src = bscpy dest_rest src | ||
201 | bscpy dest (PS _ _ 0 `Chunk` src_rest) = bscpy dest src_rest | ||
202 | bscpy (PS dest_fptr dest_off dest_size `Chunk` dest_rest) | ||
203 | (PS src_fptr src_off src_size `Chunk` src_rest) | ||
204 | = do let csize = min dest_size src_size | ||
205 | withForeignPtr dest_fptr $ \dest_ptr -> | ||
206 | withForeignPtr src_fptr $ \src_ptr -> | ||
207 | memcpy (dest_ptr `advancePtr` dest_off) | ||
208 | (src_ptr `advancePtr` src_off) | ||
209 | (fromIntegral csize) -- TODO memmove? | ||
210 | bscpy (PS dest_fptr (dest_off + csize) (dest_size - csize) `Chunk` dest_rest) | ||
211 | (PS src_fptr (src_off + csize) (src_size - csize) `Chunk` src_rest) | ||
212 | bscpy _ _ = return () \ No newline at end of file | ||
diff --git a/src/System/Torrent/Storage.hs b/src/System/Torrent/Storage.hs deleted file mode 100644 index 99d164f2..00000000 --- a/src/System/Torrent/Storage.hs +++ /dev/null | |||
@@ -1,332 +0,0 @@ | |||
1 | -- | | ||
2 | -- Copyright : (c) Sam T. 2013 | ||
3 | -- License : MIT | ||
4 | -- Maintainer : pxqr.sta@gmail.com | ||
5 | -- Stability : experimental | ||
6 | -- Portability : non-portable | ||
7 | -- | ||
8 | -- This module implements mapping from single continious block space | ||
9 | -- to file storage. Storage can be used in two modes: | ||
10 | -- | ||
11 | -- * As in memory storage - in this case we don't touch filesystem. | ||
12 | -- | ||
13 | -- * As ordinary mmaped file storage - when we need to store | ||
14 | -- data in the filesystem. | ||
15 | -- | ||
16 | {-# LANGUAGE DoAndIfThenElse #-} | ||
17 | {-# LANGUAGE NamedFieldPuns #-} | ||
18 | {-# LANGUAGE RecordWildCards #-} | ||
19 | module System.Torrent.Storage | ||
20 | ( Storage | ||
21 | , ppStorage | ||
22 | |||
23 | -- * Construction | ||
24 | , openStorage, closeStorage, withStorage | ||
25 | , getCompleteBitfield | ||
26 | |||
27 | -- * Modification | ||
28 | , getBlk, putBlk, selBlk | ||
29 | |||
30 | -- * TODO expose only File interface! | ||
31 | -- * File interface | ||
32 | , FD | ||
33 | , openFD, flushFD, closeFD | ||
34 | , readFD, writeFD | ||
35 | ) where | ||
36 | |||
37 | import Control.Applicative | ||
38 | import Control.Concurrent.STM | ||
39 | import Control.Exception | ||
40 | import Control.Monad | ||
41 | import Control.Monad.Trans | ||
42 | |||
43 | import Data.ByteString as B | ||
44 | import qualified Data.ByteString.Lazy as Lazy | ||
45 | import Text.PrettyPrint | ||
46 | import System.FilePath | ||
47 | import System.Directory | ||
48 | import Foreign.C.Error | ||
49 | |||
50 | import Data.Bitfield as BF | ||
51 | import Data.Torrent | ||
52 | import Network.BitTorrent.Exchange.Protocol | ||
53 | import System.IO.MMap.Fixed as Fixed | ||
54 | |||
55 | -- TODO merge piece validation and Sessions.available into one transaction. | ||
56 | data Storage = Storage { | ||
57 | -- | | ||
58 | metainfo :: !Torrent | ||
59 | |||
60 | -- | Bitmask of complete and verified _pieces_. | ||
61 | , complete :: !(TVar Bitfield) | ||
62 | |||
63 | -- | Bitmask of complete _blocks_. | ||
64 | , blocks :: !(TVar Bitfield) | ||
65 | -- TODO use bytestring for fast serialization | ||
66 | -- because we need to write this bitmap to disc periodically | ||
67 | |||
68 | , blockSize :: !Int | ||
69 | |||
70 | -- | Used to map linear block addresses to disjoint | ||
71 | -- mallocated/mmaped adresses. | ||
72 | , payload :: !Fixed | ||
73 | } | ||
74 | |||
75 | ppStorage :: Storage -> IO Doc | ||
76 | ppStorage Storage {..} = pp <$> readTVarIO blocks | ||
77 | where | ||
78 | pp bf = int blockSize | ||
79 | |||
80 | getCompleteBitfield :: Storage -> STM Bitfield | ||
81 | getCompleteBitfield Storage {..} = readTVar complete | ||
82 | |||
83 | {----------------------------------------------------------------------- | ||
84 | Construction | ||
85 | -----------------------------------------------------------------------} | ||
86 | |||
87 | -- TODO doc args | ||
88 | openStorage :: Torrent -> FilePath -> Bitfield -> IO Storage | ||
89 | openStorage t @ Torrent {..} contentPath bf = do | ||
90 | let content_paths = contentLayout contentPath tInfo | ||
91 | mapM_ (mkDir . fst) content_paths | ||
92 | |||
93 | let blockSize = defaultBlockSize `min` ciPieceLength tInfo | ||
94 | print $ "content length " ++ show (contentLength tInfo) | ||
95 | Storage t <$> newTVarIO bf | ||
96 | <*> newTVarIO (haveNone (blockCount blockSize tInfo)) | ||
97 | <*> pure blockSize | ||
98 | <*> coalesceFiles content_paths | ||
99 | where | ||
100 | mkDir path = do | ||
101 | let dirPath = fst (splitFileName path) | ||
102 | exist <- doesDirectoryExist dirPath | ||
103 | unless exist $ do | ||
104 | createDirectoryIfMissing True dirPath | ||
105 | |||
106 | -- TODO | ||
107 | closeStorage :: Storage -> IO () | ||
108 | closeStorage st = return () | ||
109 | |||
110 | |||
111 | withStorage :: Torrent -> FilePath -> Bitfield -> (Storage -> IO a) -> IO a | ||
112 | withStorage se path bf = bracket (openStorage se path bf) closeStorage | ||
113 | |||
114 | {----------------------------------------------------------------------- | ||
115 | Modification | ||
116 | -----------------------------------------------------------------------} | ||
117 | |||
118 | -- TODO to avoid races we might need to try Control.Concurrent.yield | ||
119 | -- TODO make block_payload :: Lazy.ByteString | ||
120 | |||
121 | selBlk :: MonadIO m => PieceIx -> Storage -> m [BlockIx] | ||
122 | selBlk pix st @ Storage {..} | ||
123 | = liftIO $ {-# SCC selBlk #-} atomically $ do | ||
124 | mask <- pieceMask pix st | ||
125 | select mask <$> readTVar blocks | ||
126 | where | ||
127 | select mask = fmap mkBix . toList . difference mask | ||
128 | -- TODO clip upper bound of block index | ||
129 | mkBix ix = BlockIx pix (blockSize * (ix - offset)) blockSize | ||
130 | |||
131 | offset = coeff * pix | ||
132 | coeff = ciPieceLength (tInfo metainfo) `div` blockSize | ||
133 | |||
134 | -- | ||
135 | -- TODO make global lock map -- otherwise we might get broken pieces | ||
136 | -- | ||
137 | -- imagine the following situation: | ||
138 | -- | ||
139 | -- thread1: write | ||
140 | -- thread1: mark | ||
141 | -- | ||
142 | -- this let us avoid races as well | ||
143 | -- | ||
144 | |||
145 | -- | Write a block to the storage. If block out of range then block is clipped. | ||
146 | -- | ||
147 | -- | ||
148 | -- | ||
149 | putBlk :: MonadIO m => Block -> Storage -> m Bool | ||
150 | putBlk blk @ Block {..} st @ Storage {..} | ||
151 | = liftIO $ {-# SCC putBlk #-} do | ||
152 | -- let blkIx = undefined | ||
153 | -- bm <- readTVarIO blocks | ||
154 | -- unless (member blkIx bm) $ do | ||
155 | writeBytes (blkInterval (ciPieceLength (tInfo metainfo)) blk) blkData payload | ||
156 | |||
157 | markBlock blk st | ||
158 | validatePiece blkPiece st | ||
159 | |||
160 | markBlock :: Block -> Storage -> IO () | ||
161 | markBlock Block {..} Storage {..} = {-# SCC markBlock #-} do | ||
162 | let piLen = ciPieceLength (tInfo metainfo) | ||
163 | let glIx = (piLen `div` blockSize) * blkPiece + (blkOffset `div` blockSize) | ||
164 | atomically $ modifyTVar' blocks (have glIx) | ||
165 | |||
166 | -- | Read a block by given block index. If lower or upper bound out of | ||
167 | -- range then index is clipped. | ||
168 | -- | ||
169 | -- Do not block. | ||
170 | -- | ||
171 | getBlk :: MonadIO m => BlockIx -> Storage -> m Block | ||
172 | getBlk ix @ BlockIx {..} st @ Storage {..} | ||
173 | = liftIO $ {-# SCC getBlk #-} do | ||
174 | -- TODO check if __piece__ is available | ||
175 | let piLen = ciPieceLength (tInfo metainfo) | ||
176 | bs <- readBytes (ixInterval piLen ix) payload | ||
177 | return $ Block ixPiece ixOffset bs | ||
178 | |||
179 | getPiece :: PieceIx -> Storage -> IO ByteString | ||
180 | getPiece pix st @ Storage {..} = {-# SCC getPiece #-} do | ||
181 | let piLen = ciPieceLength (tInfo metainfo) | ||
182 | let bix = BlockIx pix 0 piLen | ||
183 | let bs = viewBytes (ixInterval piLen bix) payload | ||
184 | return $! Lazy.toStrict bs | ||
185 | |||
186 | resetPiece :: PieceIx -> Storage -> IO () | ||
187 | resetPiece pix st @ Storage {..} | ||
188 | = {-# SCC resetPiece #-} atomically $ do | ||
189 | mask <- pieceMask pix st | ||
190 | modifyTVar' blocks (`difference` mask) | ||
191 | |||
192 | validatePiece :: PieceIx -> Storage -> IO Bool | ||
193 | validatePiece pix st @ Storage {..} = {-# SCC validatePiece #-} do | ||
194 | downloaded <- atomically $ isDownloaded pix st | ||
195 | if not downloaded then return False | ||
196 | else do | ||
197 | piece <- getPiece pix st | ||
198 | if checkPiece (tInfo metainfo) pix piece | ||
199 | then do | ||
200 | atomically $ modifyTVar' complete (BF.have pix) | ||
201 | return True | ||
202 | else do | ||
203 | print $ "----------------------------- invalid " ++ show pix | ||
204 | -- resetPiece pix st | ||
205 | return True | ||
206 | |||
207 | -- | Check each piece in the storage against content info hash. | ||
208 | -- | ||
209 | -- Note that this function will block until each the entire storage | ||
210 | -- checked. This may take a long time for a big torrents use fork | ||
211 | -- if needed. | ||
212 | -- | ||
213 | validateStorage :: Storage -> IO () | ||
214 | validateStorage st = undefined -- (`validatePiece` st) [0..pieceCount st] | ||
215 | |||
216 | {----------------------------------------------------------------------- | ||
217 | POSIX-like file interface | ||
218 | ------------------------------------------------------------------------ | ||
219 | This is useful for virtual filesystem writers and just for per file | ||
220 | interface. | ||
221 | -----------------------------------------------------------------------} | ||
222 | -- TODO reference counting: storage might be closed before all FDs | ||
223 | -- gets closed! | ||
224 | -- or we can forbid to close storage and use finalizers only? | ||
225 | |||
226 | type Offset = Int | ||
227 | type Size = Int | ||
228 | |||
229 | data FD = FD { | ||
230 | fdData :: ByteString | ||
231 | , fdNoBlock :: Bool | ||
232 | } | ||
233 | |||
234 | |||
235 | -- TODO return "is dir" error | ||
236 | -- | This call correspond to open(2) with the following parameters: | ||
237 | -- | ||
238 | -- * OpenMode = ReadOnly; | ||
239 | -- | ||
240 | -- * OpenFileFlags = O_NONBLOCK. (not true yet) | ||
241 | -- | ||
242 | openFD :: FilePath -> Bool -> Storage -> IO (Either Errno FD) | ||
243 | openFD path nonblock Storage {..} | ||
244 | | Just offset <- fileOffset path (tInfo metainfo) | ||
245 | , Just bs <- lookupRegion (fromIntegral offset) payload | ||
246 | = return $ Right $ FD bs nonblock | ||
247 | | otherwise = return $ Left $ eNOENT | ||
248 | |||
249 | -- | Cancel all enqueued read operations and report any delayed | ||
250 | -- errors. | ||
251 | flushFD :: FD -> IO Errno | ||
252 | flushFD _ = return eOK | ||
253 | |||
254 | -- | This call correspond to close(2). | ||
255 | closeFD :: FD -> IO () | ||
256 | closeFD _ = return () | ||
257 | |||
258 | -- TODO | ||
259 | maskRegion :: FD -> Offset -> Size -> Maybe Size | ||
260 | maskRegion FD {..} offset siz = return siz | ||
261 | |||
262 | -- TODO | ||
263 | isComplete :: FD -> Offset -> Size -> IO Size | ||
264 | isComplete _ _ siz = return siz | ||
265 | |||
266 | -- TODO | ||
267 | enqueueRead :: FD -> Offset -> Size -> IO () | ||
268 | enqueueRead _ _ _ = return () | ||
269 | |||
270 | -- TODO | ||
271 | readAhead :: FD -> Offset -> Size -> IO () | ||
272 | readAhead _ _ _ = return () | ||
273 | |||
274 | -- TODO | ||
275 | waitRegion :: FD -> Offset -> Size -> IO ByteString | ||
276 | waitRegion _ _ _ = return B.empty | ||
277 | |||
278 | -- TODO implement blocking and non blocking modes? | ||
279 | -- TODO check if region completely downloaded | ||
280 | -- TODO if not we could return EAGAIN | ||
281 | -- TODO enqueue read to piece manager | ||
282 | -- | This call correspond to pread(2). | ||
283 | readFD :: FD -> Offset -> Size -> IO (Either Errno ByteString) | ||
284 | readFD fd @ FD {..} offset reqSize = | ||
285 | case maskRegion fd offset reqSize of | ||
286 | Nothing -> return $ Right B.empty | ||
287 | Just expSize -> do | ||
288 | availSize <- isComplete fd offset expSize | ||
289 | if availSize == expSize then haveAllReg expSize else haveSomeReg expSize | ||
290 | where | ||
291 | haveAllReg expSize = do | ||
292 | readAhead fd offset expSize | ||
293 | return $ Right $ slice offset expSize fdData | ||
294 | |||
295 | haveSomeReg expSize | ||
296 | | fdNoBlock = return $ Left $ eAGAIN | ||
297 | | otherwise = do | ||
298 | bs <- waitRegion fd offset expSize | ||
299 | readAhead fd offset expSize | ||
300 | return $ Right bs | ||
301 | |||
302 | -- TODO implement COW; needed for applications which want to change files. | ||
303 | writeFD :: FD -> ByteString -> Offset -> IO () | ||
304 | writeFD FD {..} bs offset = return () | ||
305 | |||
306 | {----------------------------------------------------------------------- | ||
307 | Internal | ||
308 | -----------------------------------------------------------------------} | ||
309 | |||
310 | isDownloaded :: PieceIx -> Storage -> STM Bool | ||
311 | isDownloaded pix st @ Storage {..} = do | ||
312 | bf <- readTVar blocks | ||
313 | mask <- pieceMask pix st | ||
314 | return $ intersection mask bf == mask | ||
315 | |||
316 | pieceMask :: PieceIx -> Storage -> STM Bitfield | ||
317 | pieceMask pix Storage {..} = do | ||
318 | bf <- readTVar blocks | ||
319 | return $ BF.interval (totalCount bf) offset (offset + coeff - 1) | ||
320 | where | ||
321 | offset = coeff * pix | ||
322 | coeff = ciPieceLength (tInfo metainfo) `div` blockSize | ||
323 | |||
324 | |||
325 | ixInterval :: Int -> BlockIx -> FixedInterval | ||
326 | ixInterval pieceSize BlockIx {..} = | ||
327 | Fixed.interval (ixPiece * pieceSize + ixOffset) ixLength | ||
328 | |||
329 | blkInterval :: Int -> Block -> FixedInterval | ||
330 | blkInterval pieceSize Block {..} = | ||
331 | Fixed.interval (blkPiece * pieceSize + blkOffset) | ||
332 | (fromIntegral (Lazy.length blkData)) \ No newline at end of file | ||