[or-cvs] r20929: {} Check in code for doing hill-climbing optimization of node-s (projects/performance/node-selection)

sjm217 at seul.org sjm217 at seul.org
Mon Nov 9 11:19:48 UTC 2009


Author: sjm217
Date: 2009-11-09 06:19:47 -0500 (Mon, 09 Nov 2009)
New Revision: 20929

Added:
   projects/performance/node-selection/hillclimbing.py
Log:
Check in code for doing hill-climbing optimization of node-selection probabilities (from r6583 of local repository)

Added: projects/performance/node-selection/hillclimbing.py
===================================================================
--- projects/performance/node-selection/hillclimbing.py	                        (rev 0)
+++ projects/performance/node-selection/hillclimbing.py	2009-11-09 11:19:47 UTC (rev 20929)
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+
+import sys
+
+if __name__=="__main__":
+    import matplotlib
+    matplotlib.use('TkAgg') # do this before importing pylab
+
+import matplotlib.pyplot as plt
+from numpy import *
+
+import time
+
+import threading
+
+ILIMIT = 1000
+
+def wait(x, p, L, isbroken):
+    z = p*L*x
+    a = z*x
+    b = 2.0*(1.0-z)
+    return x + a/b
+
+def calc_waittime(prob, xs, totalusage, debug=False):
+    #print "info", prob[0], nodebw[0], totalusage
+    ## Check that probabilities really add up to 1
+    assert abs(1.0 - prob.sum()) < 1e-6
+
+    ## Calculate processing time of a node
+    #xs = [1.0/t for t in nodebw]
+    #xs = 1.0/nodebw
+
+    ## Find overloaded nodes
+    #loading_factor = [x*p*totalusage for p, x in zip(prob, xs)]
+    #broken = [ld < 0.0 or ld > 1.0 for ld in loading_factor]
+
+    loading_factor = xs*prob*totalusage
+    broken = (loading_factor < 0.0) | (loading_factor > 1.0)
+    ## Number of broken nodes
+    #print "Broken", len([x for x in broken if x])
+
+    ## Calculate weighted waiting time
+    #wtime = [wait(x, p, totalusage, isbroken)
+    #          for x, p, isbroken in zip(xs, prob, broken)]
+    wtime = wait(xs, prob, totalusage, False)
+    wtime[broken] = -1.0
+    #print wtime[0]
+
+    ## Get maximimum waiting time for non-broken nodes
+    cap = wtime.max()
+    #if debug:
+    #    print cap
+    #print wtime
+    #print "Cap", cap
+
+    ## Calculated capped weighted waiting time
+    wtime[broken] = cap
+    wwtime = wtime * prob
+        
+    #wwtime = [wwait_cap(w, p, cap) for w, p in zip(wtime, prob)]
+    
+    return wwtime
+
+def test(fn = None):
+    if fn is None:
+        fn = sys.argv[1]
+    fh = file(fn, "rt")
+
+    ## Load in node bandwidths and total network usage
+    totalusage = float64(0.0)
+    totalbw = 0.0
+    nodebw = []
+    for line in fh:
+        bw, usage = line.split()
+        bw = float(bw)/512.0
+        usage = float(usage)/512.0
+        ## Ignore nodes with BW outside acceptable range
+        if bw<=0.0 or bw >= (10.0e6/512.0):
+            continue
+        totalusage += usage
+        totalbw += bw
+        nodebw.append(bw)
+
+    #print totalusage, totalbw
+
+    pu = array([1.0/len(nodebw)] * len(nodebw))
+    pt = array([bw / totalbw for bw in nodebw])
+
+    anodebw = array(nodebw)
+    xs = 1.0/anodebw
+    x = calc_waittime(pt, xs, totalusage, True)
+    y = calc_waittime(pu, xs, totalusage, True)
+
+    #for i in range(10000):
+    #    _ = calc_waittime(pt, xs, totalusage)
+        
+    #print x
+    print "E(Tor)", sum(x)
+    #print y
+    print "E(Uniform)", sum(y)
+
+    return totalusage, anodebw, x,y
+
+def optimize(sprob, ss, xs, totalusage, amount):
+    l = len(sprob)
+    i = 0
+    while True:
+        i += 1
+        ## Take copy of base
+        prob = sprob.copy()
+        
+        ## Find elements to change
+        a = random.randint(0, l-1)
+        b = random.randint(0, l-1)
+
+        ## Find amount to modify by
+        if prob[a] < amount:
+            damount = prob[a]
+        else:
+            damount = amount
+
+        ## Move between two elements
+        prob[a] -= damount
+        prob[b] += damount
+
+        ## Calculate the function to optimize
+        wwait = calc_waittime(prob, xs, totalusage, True)
+        s = wwait.sum()
+
+        if s < ss:
+            return i, prob, wwait, s
+        if i > ILIMIT:
+            wwait = calc_waittime(sprob, xs, totalusage, True)
+            return i, sprob, wwait, wwait.sum()
+
+def save(anodebw, prob, xs, totalusage, count):
+    fh = file("nodeprob-%06d"%count, "wt")
+    anodebw.dump(fh)
+    prob.dump(fh)
+    xs.dump(fh)
+    totalusage.dump(fh)
+    fh.close()
+    #fh.write("bw prob\n")
+    #for i in range(len(anodebw)):
+    #    fh.write("%f %f\n"%(anodebw[i],prob[i]))
+    #fh.close()
+
+class Animator(threading.Thread):
+    def __init__(self, prob, totalusage, anodebw, fig, ax):
+        threading.Thread.__init__(self)
+        self.prob = prob
+        self.totalusage = totalusage
+        self.anodebw = anodebw
+        self.xs = 1.0/anodebw
+
+        self.anodebw = anodebw
+        self.fig = fig
+        self.ax = ax
+    
+    def run(self):
+        wwait = calc_waittime(self.prob, self.xs, self.totalusage)
+        s = wwait.sum()
+        #line, = self.ax.plot(self.anodebw, wwait.cumsum())
+        line, = self.ax.plot(wwait.cumsum())
+        amount = 1.0/2
+
+        i = 0
+        while True:
+            i += 1
+            cnt, self.prob, wwait, s = optimize(self.prob, s, self.xs, self.totalusage, amount)
+            print i, cnt, s
+            if cnt > ILIMIT:
+                amount /= 2.0
+                print "Narrowing... to", amount
+                save(self.anodebw, self.prob, self.xs, self.totalusage, i)
+                
+            line.set_ydata(cumsum(wwait))
+            self.fig.canvas.draw()
+            #time.sleep(1)
+
+def main():
+    totalusage, anodebw, x, y = test()
+    
+    pu = array([1.0/len(anodebw)] * len(anodebw))
+    
+    plt.ioff()
+    fig = plt.figure()
+    ax = fig.add_subplot(111)
+
+    anim = Animator(pu, totalusage, anodebw, fig, ax)
+
+    win = fig.canvas.manager.window
+    fig.canvas.manager.window.after(100, anim.start)
+
+    plt.show()
+
+if __name__=="__main__":
+    main()
+
+
+



More information about the tor-commits mailing list