-- | -- Copyright : (c) Sam Truzjan 2013 -- License : BSD3 -- Maintainer : pxqr.sta@gmail.com -- Stability : experimental -- Portability : portable -- -- Every node maintains a routing table of known good nodes. The -- nodes in the routing table are used as starting points for -- queries in the DHT. Nodes from the routing table are returned in -- response to queries from other nodes. -- -- For more info see: -- -- {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Network.BitTorrent.DHT.Routing ( -- * Table Table , Info(..) -- * Attributes , BucketCount , defaultBucketCount , BucketSize , defaultBucketSize , NodeCount -- * Query , Network.BitTorrent.DHT.Routing.null , Network.BitTorrent.DHT.Routing.full , thisId , shape , Network.BitTorrent.DHT.Routing.size , Network.BitTorrent.DHT.Routing.depth , compatibleNodeId -- * Lookup , K , defaultK , TableKey (..) , kclosest -- * Construction , Network.BitTorrent.DHT.Routing.nullTable , Event(..) , CheckPing(..) , Network.BitTorrent.DHT.Routing.insert -- * Conversion , Network.BitTorrent.DHT.Routing.TableEntry , Network.BitTorrent.DHT.Routing.toList -- * Routing , Timestamp , Routing , runRouting ) where import Control.Applicative as A import Control.Arrow import Control.Monad import Data.Function import Data.Functor.Identity import Data.List as L hiding (insert) import Data.Maybe import Data.Monoid import Data.PSQueue as PSQ import Data.Serialize as S hiding (Result, Done) import qualified Data.Sequence as Seq import Data.Time import Data.Time.Clock.POSIX import Data.Word import GHC.Generics import Text.PrettyPrint as PP hiding ((<>)) import Text.PrettyPrint.HughesPJClass (pPrint,Pretty) import qualified Data.ByteString as BS import Data.Bits import Data.Torrent import Network.BitTorrent.Address {----------------------------------------------------------------------- -- Routing monad -----------------------------------------------------------------------} -- | Last time the node was responding to our queries. -- -- Not all nodes that we learn about are equal. Some are \"good\" and -- some are not. Many nodes using the DHT are able to send queries -- and receive responses, but are not able to respond to queries -- from other nodes. It is important that each node's routing table -- must contain only known good nodes. A good node is a node has -- responded to one of our queries within the last 15 minutes. A -- node is also good if it has ever responded to one of our queries -- and has sent us a query within the last 15 minutes. After 15 -- minutes of inactivity, a node becomes questionable. Nodes become -- bad when they fail to respond to multiple queries in a row. Nodes -- that we know are good are given priority over nodes with unknown -- status. -- type Timestamp = POSIXTime -- | Some routing operations might need to perform additional IO. data Routing ip result = Full | Done result | GetTime ( Timestamp -> Routing ip result) | NeedPing (NodeAddr ip) ( Bool -> Routing ip result) | Refresh NodeId (Routing ip result) instance Functor (Routing ip) where fmap _ Full = Full fmap f (Done r) = Done ( f r) fmap f (GetTime g) = GetTime (fmap f . g) fmap f (NeedPing addr g) = NeedPing addr (fmap f . g) fmap f (Refresh nid g) = Refresh nid (fmap f g) instance Monad (Routing ip) where return = Done Full >>= _ = Full Done r >>= m = m r GetTime f >>= m = GetTime $ \ t -> f t >>= m NeedPing a f >>= m = NeedPing a $ \ p -> f p >>= m Refresh n f >>= m = Refresh n $ f >>= m instance Applicative (Routing ip) where pure = return (<*>) = ap instance Alternative (Routing ip) where empty = Full Full <|> m = m Done a <|> _ = Done a GetTime f <|> m = GetTime $ \ t -> f t <|> m NeedPing a f <|> m = NeedPing a $ \ p -> f p <|> m Refresh n f <|> m = Refresh n (f <|> m) -- | Run routing table operation. runRouting :: Monad m => (NodeAddr ip -> m Bool) -- ^ ping the specific node; -> (NodeId -> m ()) -- ^ refresh nodes; -> m Timestamp -- ^ get current time; -> Routing ip f -- ^ operation to run; -> m (Maybe f) -- ^ operation result; runRouting ping_node find_nodes timestamper = go where go Full = return (Nothing) go (Done r) = return (Just r) go (GetTime f) = do t <- timestamper go (f t) go (NeedPing addr f) = do pong <- ping_node addr go (f pong) go (Refresh nid f) = do find_nodes nid go f {----------------------------------------------------------------------- Bucket -----------------------------------------------------------------------} -- TODO: add replacement cache to the bucket -- -- When a k-bucket is full and a new node is discovered for that -- k-bucket, the least recently seen node in the k-bucket is -- PINGed. If the node is found to be still alive, the new node is -- place in a secondary list, a replacement cache. The replacement -- cache is used only if a node in the k-bucket stops responding. In -- other words: new nodes are used only when older nodes disappear. -- | Timestamp - last time this node is pinged. type NodeEntry ip = Binding (NodeInfo ip) Timestamp instance (Serialize k, Serialize v) => Serialize (Binding k v) where get = (:->) <$> get <*> get put (k :-> v) = put k >> put v -- TODO instance Pretty where -- | Number of nodes in a bucket. type BucketSize = Int -- | Maximum number of 'NodeInfo's stored in a bucket. Most clients -- use this value. defaultBucketSize :: BucketSize defaultBucketSize = 8 data QueueMethods m elem fifo = QueueMethods { pushBack :: elem -> fifo -> m fifo , popFront :: fifo -> m (Maybe elem, fifo) , emptyQueue :: m fifo } {- fromQ :: Functor m => ( a -> b ) -> ( b -> a ) -> QueueMethods m elem a -> QueueMethods m elem b fromQ embed project QueueMethods{..} = QueueMethods { pushBack = \e -> fmap embed . pushBack e . project , popFront = fmap (second embed) . popFront . project , emptyQueue = fmap embed emptyQueue } -} seqQ :: QueueMethods Identity (NodeInfo ip) (Seq.Seq (NodeInfo ip)) seqQ = QueueMethods { pushBack = \e fifo -> pure (fifo Seq.|> e) , popFront = \fifo -> case Seq.viewl fifo of e Seq.:< fifo' -> pure (Just e, fifo') Seq.EmptyL -> pure (Nothing, Seq.empty) , emptyQueue = pure Seq.empty } type BucketQueue ip = Seq.Seq (NodeInfo ip) bucketQ :: QueueMethods Identity (NodeInfo ip) (BucketQueue ip) bucketQ = seqQ -- | Bucket is also limited in its length — thus it's called k-bucket. -- When bucket becomes full, we should split it in two lists by -- current span bit. Span bit is defined by depth in the routing -- table tree. Size of the bucket should be choosen such that it's -- very unlikely that all nodes in bucket fail within an hour of -- each other. -- data Bucket ip = Bucket { bktNodes :: PSQ (NodeInfo ip) Timestamp , bktQ :: BucketQueue ip } deriving (Show,Generic) instance (Eq ip, Serialize ip) => Serialize (Bucket ip) instance (Serialize k, Serialize v, Ord k, Ord v) => Serialize (PSQ k v) where get = PSQ.fromList <$> get put = put . PSQ.toList -- | Update interval, in seconds. delta :: NominalDiffTime delta = 15 * 60 -- | Should maintain a set of stable long running nodes. -- -- Note: pings are triggerd only when a bucket is full. insertBucket :: (Eq ip, Alternative f) => Timestamp -> Event ip -> Bucket ip -> f ([CheckPing ip], Bucket ip) insertBucket curTime (TryInsert info) bucket -- just update timestamp if a node is already in bucket | already_have = pure ( [], map_ns $ PSQ.insertWith max info curTime ) -- bucket is good, but not full => we can insert a new node | PSQ.size (bktNodes bucket) < defaultBucketSize = pure ( [], map_ns $ PSQ.insert info curTime ) -- If there are any questionable nodes in the bucket have not been -- seen in the last 15 minutes, the least recently seen node is -- pinged. If any nodes in the bucket are known to have become bad, -- then one is replaced by the new node in the next insertBucket -- iteration. | not (L.null stales) = pure ( [CheckPing stales], map_q $ pushBack bucketQ info ) -- When the bucket is full of good nodes, the new node is simply discarded. -- We must return 'A.empty' here to ensure that bucket splitting happens -- inside 'modifyBucket'. | otherwise = A.empty where stales = map key $ PSQ.atMost (curTime - delta) $ bktNodes bucket already_have = maybe False (const True) $ PSQ.lookup info (bktNodes bucket) map_ns f = bucket { bktNodes = f (bktNodes bucket) } map_q f = bucket { bktQ = runIdentity $ f (bktQ bucket) } insertBucket curTime (PingResult bad_node got_response) bucket = pure ([], Bucket (upd $ bktNodes bucket) popped) where (top, popped) = runIdentity $ popFront bucketQ (bktQ bucket) upd | got_response = id | Just info <- top = PSQ.insert info curTime . PSQ.delete bad_node | otherwise = id type BitIx = Word partitionQ :: Monad f => QueueMethods f elem b -> (elem -> Bool) -> b -> f (b, b) partitionQ imp test q0 = do pass0 <- emptyQueue imp fail0 <- emptyQueue imp let flipfix a b f = fix f a b flipfix q0 (pass0,fail0) $ \rec q qs -> do (mb,q') <- popFront imp q case mb of Nothing -> return qs Just e -> do qs' <- select (pushBack imp e) qs rec q' qs' where select :: Functor f => (b -> f b) -> (b, b) -> f (b, b) select f = if test e then \(a,b) -> flip (,) b <$> f a else \(a,b) -> (,) a <$> f b split :: Eq ip => BitIx -> Bucket ip -> (Bucket ip, Bucket ip) split i b = (Bucket ns qs, Bucket ms rs) where (ns,ms) = (PSQ.fromList *** PSQ.fromList) . partition (spanBit . key) . PSQ.toList $ bktNodes b (qs,rs) = runIdentity $ partitionQ bucketQ spanBit $ bktQ b spanBit entry = testIdBit (nodeId entry) i {----------------------------------------------------------------------- -- Table -----------------------------------------------------------------------} -- | Number of buckets in a routing table. type BucketCount = Int defaultBucketCount :: BucketCount defaultBucketCount = 20 data Info ip = Info { myBuckets :: Table ip , myNodeId :: NodeId , myAddress :: ip } deriving (Eq, Show, Generic) instance (Eq ip, Serialize ip) => Serialize (Info ip) -- | The routing table covers the entire 'NodeId' space from 0 to 2 ^ -- 160. The routing table is subdivided into 'Bucket's that each cover -- a portion of the space. An empty table has one bucket with an ID -- space range of @min = 0, max = 2 ^ 160@. When a node with ID \"N\" -- is inserted into the table, it is placed within the bucket that has -- @min <= N < max@. An empty table has only one bucket so any node -- must fit within it. Each bucket can only hold 'K' nodes, currently -- eight, before becoming 'Full'. When a bucket is full of known good -- nodes, no more nodes may be added unless our own 'NodeId' falls -- within the range of the 'Bucket'. In that case, the bucket is -- replaced by two new buckets each with half the range of the old -- bucket and the nodes from the old bucket are distributed among the -- two new ones. For a new table with only one bucket, the full bucket -- is always split into two new buckets covering the ranges @0..2 ^ -- 159@ and @2 ^ 159..2 ^ 160@. -- data Table ip -- most nearest bucket = Tip NodeId BucketCount (Bucket ip) -- left biased tree branch | Zero (Table ip) (Bucket ip) -- right biased tree branch | One (Bucket ip) (Table ip) deriving (Show, Generic) instance Eq ip => Eq (Table ip) where (==) = (==) `on` Network.BitTorrent.DHT.Routing.toList instance Serialize NominalDiffTime where put = putWord32be . fromIntegral . fromEnum get = (toEnum . fromIntegral) <$> getWord32be -- | Normally, routing table should be saved between invocations of -- the client software. Note that you don't need to store /this/ -- 'NodeId' since it is already included in routing table. instance (Eq ip, Serialize ip) => Serialize (Table ip) -- | Shape of the table. instance Pretty (Table ip) where pPrint t | bucketCount < 6 = hcat $ punctuate ", " $ L.map PP.int ss | otherwise = brackets $ PP.int (L.sum ss) <> " nodes, " <> PP.int bucketCount <> " buckets" where bucketCount = L.length ss ss = shape t -- | Empty table with specified /spine/ node id. nullTable :: Eq ip => NodeId -> BucketCount -> Table ip nullTable nid n = Tip nid (bucketCount (pred n)) (Bucket PSQ.empty (runIdentity $ emptyQueue bucketQ)) where bucketCount x = max 0 (min 159 x) -- | Test if table is empty. In this case DHT should start -- bootstrapping process until table becomes 'full'. null :: Table ip -> Bool null (Tip _ _ b) = PSQ.null $ bktNodes b null _ = False -- | Test if table have maximum number of nodes. No more nodes can be -- 'insert'ed, except old ones becomes bad. full :: Table ip -> Bool full (Tip _ n _) = n == 0 full (Zero t b) = PSQ.size (bktNodes b) == defaultBucketSize && full t full (One b t) = PSQ.size (bktNodes b) == defaultBucketSize && full t -- | Get the /spine/ node id. thisId :: Table ip -> NodeId thisId (Tip nid _ _) = nid thisId (Zero table _) = thisId table thisId (One _ table) = thisId table -- | Number of nodes in a bucket or a table. type NodeCount = Int -- | Internally, routing table is similar to list of buckets or a -- /matrix/ of nodes. This function returns the shape of the matrix. shape :: Table ip -> [BucketSize] shape = map (PSQ.size . bktNodes) . toBucketList -- | Get number of nodes in the table. size :: Table ip -> NodeCount size = L.sum . shape -- | Get number of buckets in the table. depth :: Table ip -> BucketCount depth = L.length . shape lookupBucket :: NodeId -> Table ip -> Maybe (Bucket ip) lookupBucket nid = go 0 where go i (Zero table bucket) | testIdBit nid i = pure bucket | otherwise = go (succ i) table go i (One bucket table) | testIdBit nid i = go (succ i) table | otherwise = pure bucket go _ (Tip _ _ bucket) = pure bucket compatibleNodeId :: Table ip -> IO NodeId compatibleNodeId tbl = genBucketSample prefix br where br = bucketRange (L.length (shape tbl) - 1) True bs = BS.pack $ take nodeIdSize $ tablePrefix tbl ++ repeat 0 prefix = NodeId bs tablePrefix :: Table ip -> [Word8] tablePrefix = map (packByte . take 8 . (++repeat False)) . chunksOf 8 . tableBits where packByte = foldl1' (.|.) . zipWith bitmask [7,6 .. 0] bitmask ix True = bit ix bitmask _ _ = 0 tableBits :: Table ip -> [Bool] tableBits (One _ tbl) = True : tableBits tbl tableBits (Zero tbl _) = False : tableBits tbl tableBits (Tip _ _ _) = [] chunksOf :: Int -> [e] -> [[e]] chunksOf i ls = map (take i) (build (splitter ls)) where splitter :: [e] -> ([e] -> a -> a) -> a -> a splitter [] _ n = n splitter l c n = l `c` splitter (drop i l) c n build :: ((a -> [a] -> [a]) -> [a] -> [a]) -> [a] build g = g (:) [] -- | Count of closest nodes in find_node request. type K = Int -- | Default 'K' is equal to 'defaultBucketSize'. defaultK :: K defaultK = 8 class TableKey k where toNodeId :: k -> NodeId instance TableKey NodeId where toNodeId = id instance TableKey InfoHash where toNodeId = either (error msg) id . S.decode . S.encode where -- TODO unsafe coerse? msg = "tableKey: impossible" -- | Get a list of /K/ closest nodes using XOR metric. Used in -- 'find_node' and 'get_peers' queries. kclosest :: Eq ip => TableKey a => K -> a -> Table ip -> [NodeInfo ip] kclosest k (toNodeId -> nid) = L.take k . rank nodeId nid . L.map PSQ.key . PSQ.toList . fromMaybe PSQ.empty . fmap bktNodes . lookupBucket nid {----------------------------------------------------------------------- -- Routing -----------------------------------------------------------------------} splitTip :: Eq ip => NodeId -> BucketCount -> BitIx -> Bucket ip -> Table ip splitTip nid n i bucket | testIdBit nid i = (One zeros (Tip nid (pred n) ones)) | otherwise = (Zero (Tip nid (pred n) zeros) ones) where (ones, zeros) = split i bucket -- | Used in each query. -- -- TODO: Kademlia non-empty subtrees should should split if they have less than -- k nodes in them. Which subtrees I mean is illustrated in Fig 1. of Kademlia -- paper. The rule requiring additional splits is in section 2.4. modifyBucket :: forall f ip xs. (Alternative f, Eq ip) => NodeId -> (Bucket ip -> f (xs, Bucket ip)) -> Table ip -> f (xs,Table ip) modifyBucket nodeId f = go (0 :: BitIx) where go :: BitIx -> Table ip -> f (xs, Table ip) go i (Zero table bucket) | testIdBit nodeId i = second (Zero table) <$> f bucket | otherwise = second (`Zero` bucket) <$> go (succ i) table go i (One bucket table ) | testIdBit nodeId i = second (One bucket) <$> go (succ i) table | otherwise = second (`One` table) <$> f bucket go i (Tip nid n bucket) | n == 0 = second (Tip nid n) <$> f bucket | otherwise = second (Tip nid n) <$> f bucket <|> go i (splitTip nid n i bucket) -- | Triggering event for atomic table update data Event ip = TryInsert { foreignNode :: NodeInfo ip } | PingResult { foreignNode :: NodeInfo ip , ponged :: Bool } deriving (Eq,Ord,Show) eventId :: Event ip -> NodeId eventId (TryInsert NodeInfo{..}) = nodeId eventId (PingResult NodeInfo{..} _) = nodeId -- | Actions requested by atomic table update data CheckPing ip = CheckPing [NodeInfo ip] deriving (Eq,Ord,Show) -- | Atomic 'Table' update insert :: (Alternative m, Eq ip) => Timestamp -> Event ip -> Table ip -> m ([CheckPing ip], Table ip) insert tm event = modifyBucket (eventId event) (insertBucket tm event) {----------------------------------------------------------------------- -- Conversion -----------------------------------------------------------------------} type TableEntry ip = (NodeInfo ip, Timestamp) tableEntry :: NodeEntry ip -> TableEntry ip tableEntry (a :-> b) = (a, b) -- | Non-empty list of buckets. toBucketList :: Table ip -> [Bucket ip] toBucketList (Tip _ _ b) = [b] toBucketList (Zero t b) = b : toBucketList t toBucketList (One b t) = b : toBucketList t toList :: Eq ip => Table ip -> [[TableEntry ip]] toList = L.map (L.map tableEntry . PSQ.toList . bktNodes) . toBucketList