summaryrefslogtreecommitdiff
path: root/src/Network
diff options
context:
space:
mode:
authorSam T <pxqr.sta@gmail.com>2013-07-10 03:54:18 +0400
committerSam T <pxqr.sta@gmail.com>2013-07-10 03:54:18 +0400
commitb57d508bba214ba59d7af609d4cac8a57d56dfeb (patch)
treebc3a1d17d1e4d4b055a9895d838ea462deb2ee22 /src/Network
parentebdee3617ce72223419b2f16098498b19c15513b (diff)
+ Comment connect peers set.
Diffstat (limited to 'src/Network')
-rw-r--r--src/Network/BitTorrent/Internal.lhs79
1 files changed, 60 insertions, 19 deletions
diff --git a/src/Network/BitTorrent/Internal.lhs b/src/Network/BitTorrent/Internal.lhs
index 8dbf488e..c0562223 100644
--- a/src/Network/BitTorrent/Internal.lhs
+++ b/src/Network/BitTorrent/Internal.lhs
@@ -317,7 +317,7 @@ fresh required extensions.
317 * The number of /swarms/ to join, each swarm described by the 317 * The number of /swarms/ to join, each swarm described by the
318'SwarmSession'. 318'SwarmSession'.
319 319
320Normally, you would have one client session, however, if we need, in 320Normally, you would have one client session, however, if we needed, in
321one application we could have many clients with different peer ID's 321one application we could have many clients with different peer ID's
322and different enabled extensions at the same time. 322and different enabled extensions at the same time.
323 323
@@ -461,28 +461,60 @@ However if client is a seeder then the value depends on .
461> data SwarmSession = SwarmSession { 461> data SwarmSession = SwarmSession {
462> torrentMeta :: !Torrent 462> torrentMeta :: !Torrent
463 463
464> -- |
465> , clientSession :: !ClientSession 464> , clientSession :: !ClientSession
466 465
467> -- | Represent count of peers we _currently_ can connect to in the 466TODO: lower "vacantPeers" when client becomes seeder according to
468> -- swarm. Used to bound number of concurrent threads. 467throttling policy.
468
469Represent count of peers we _currently_ can connect to in the
470swarm. Used to bound number of concurrent threads. See also *Thread
471Throttling* section.
472
469> , vacantPeers :: !(MSem SessionCount) 473> , vacantPeers :: !(MSem SessionCount)
470 474
471> -- | Modify this carefully updating global progress. 475Client bitfield is used to keep track "the client have" piece set.
476Modify this carefully always updating global progress.
477
472> , clientBitfield :: !(TVar Bitfield) 478> , clientBitfield :: !(TVar Bitfield)
473 479
480We keep set of the all connected peers for the each particular torrent
481to prevent duplicated and therefore reduntant TCP connections. For
482example consider the following very simle and realistic scenario:
483
484 * Peer A lookup tracker for peers.
485
486 * Peer B lookup tracker for peers.
487
488 * Finally, Peer A connect to B and Peer B connect to peer A
489simultaneously.
490
491There some other situation the problem may occur: duplicates in
492successive tracker responses, tracker and DHT returns.
493
494So without any protection we end up with two session between the same
495peers. That's bad because this could lead:
496
497 * Reduced throughput - multiple sessions between the same peers will
498mutiply control overhead (control messages, session state).
499
500 * Thread occupation - duplicated sessions will eat thread slots and
501discourage other, possible more useful, peers to establish connection.
502
503To avoid this we could check, into the one transaction, if a peer is
504already connected and add a connection only if it is not.
505
474> , connectedPeers :: !(TVar (Set PeerSession)) 506> , connectedPeers :: !(TVar (Set PeerSession))
475 507
476> -- TODO use bounded broadcast chan with priority queue and drop old entries 508TODO: use bounded broadcast chan with priority queue and drop old entries.
477> -- | Channel used for replicate messages across all peers in 509
478> -- swarm. For exsample if we get some piece we should sent to all 510Channel used for replicate messages across all peers in swarm. For
479> -- connected (and interested in) peers HAVE message. 511exsample if we get some piece we should sent to all connected (and
480> -- 512interested in) peers HAVE message.
513
481> , broadcastMessages :: !(TChan Message) 514> , broadcastMessages :: !(TChan Message)
482> } 515> }
483 516
484> -- INVARIANT: 517INVARIANT: max_sessions_count - sizeof connectedPeers = value vacantPeers
485> -- max_sessions_count - sizeof connectedPeers = value vacantPeers
486 518
487> instance Eq SwarmSession where 519> instance Eq SwarmSession where
488> (==) = (==) `on` (tInfoHash . torrentMeta) 520> (==) = (==) `on` (tInfoHash . torrentMeta)
@@ -523,6 +555,10 @@ However if client is a seeder then the value depends on .
523> getClientBitfield :: SwarmSession -> IO Bitfield 555> getClientBitfield :: SwarmSession -> IO Bitfield
524> getClientBitfield = readTVarIO . clientBitfield 556> getClientBitfield = readTVarIO . clientBitfield
525 557
558> pieceLength :: SwarmSession -> Int
559> pieceLength = ciPieceLength . tInfo . torrentMeta
560> {-# INLINE pieceLength #-}
561
526> {- 562> {-
527> haveDone :: MonadIO m => PieceIx -> SwarmSession -> m () 563> haveDone :: MonadIO m => PieceIx -> SwarmSession -> m ()
528> haveDone ix = 564> haveDone ix =
@@ -532,7 +568,8 @@ However if client is a seeder then the value depends on .
532> currentProgress 568> currentProgress
533> -} 569> -}
534 570
535> -- acquire/release mechanism: for internal use only 571Peer sessions throttling
572------------------------------------------------------------------------
536 573
537> enterSwarm :: SwarmSession -> IO () 574> enterSwarm :: SwarmSession -> IO ()
538> enterSwarm SwarmSession {..} = do 575> enterSwarm SwarmSession {..} = do
@@ -549,10 +586,6 @@ However if client is a seeder then the value depends on .
549> bracket (enterSwarm se) (const (leaveSwarm se)) 586> bracket (enterSwarm se) (const (leaveSwarm se))
550> . const 587> . const
551 588
552> pieceLength :: SwarmSession -> Int
553> pieceLength = ciPieceLength . tInfo . torrentMeta
554> {-# INLINE pieceLength #-}
555
556Peer sessions 589Peer sessions
557------------------------------------------------------------------------ 590------------------------------------------------------------------------
558 591
@@ -611,6 +644,12 @@ avoid reduntant KA messages.
611> instance Ord PeerSession where 644> instance Ord PeerSession where
612> compare = comparing connectedPeerAddr 645> compare = comparing connectedPeerAddr
613 646
647> findPieceCount :: PeerSession -> PieceCount
648> findPieceCount = pieceCount . tInfo . torrentMeta . swarmSession
649
650Peer session exceptions
651------------------------------------------------------------------------
652
614> -- | Exceptions used to interrupt the current P2P session. This 653> -- | Exceptions used to interrupt the current P2P session. This
615> -- exceptions will NOT affect other P2P sessions, DHT, peer <-> 654> -- exceptions will NOT affect other P2P sessions, DHT, peer <->
616> -- tracker, or any other session. 655> -- tracker, or any other session.
@@ -631,6 +670,9 @@ avoid reduntant KA messages.
631> putSessionException :: SessionException -> IO () 670> putSessionException :: SessionException -> IO ()
632> putSessionException = print 671> putSessionException = print
633 672
673Peer session creation
674------------------------------------------------------------------------
675
634> -- TODO modify such that we can use this in listener loop 676> -- TODO modify such that we can use this in listener loop
635> -- TODO check if it connected yet peer 677> -- TODO check if it connected yet peer
636> withPeerSession :: SwarmSession -> PeerAddr 678> withPeerSession :: SwarmSession -> PeerAddr
@@ -672,8 +714,7 @@ avoid reduntant KA messages.
672> atomically $ modifyTVar' connectedPeers (S.delete ps) 714> atomically $ modifyTVar' connectedPeers (S.delete ps)
673> close sock 715> close sock
674 716
675> findPieceCount :: PeerSession -> PieceCount 717TODO: initiatePeerSession, acceptPeerSession
676> findPieceCount = pieceCount . tInfo . torrentMeta . swarmSession
677 718
678Broadcasting: Have, Cancel, Bitfield, SuggestPiece 719Broadcasting: Have, Cancel, Bitfield, SuggestPiece
679------------------------------------------------------------------------ 720------------------------------------------------------------------------