[Assorted-commits] SF.net SVN: assorted:[1147] ydb/trunk
Brought to you by:
yangzhang
From: <yan...@us...> - 2009-01-27 23:25:29
|
Revision: 1147 http://assorted.svn.sourceforge.net/assorted/?rev=1147&view=rev Author: yangzhang Date: 2009-01-27 23:25:16 +0000 (Tue, 27 Jan 2009) Log Message: ----------- - added --exit-on-seqno - refactored analysis.py, settled in the sloppy regex extraction approach - brought back scaling analysis - improved the colors and shapes - pretty print raw data tables - cleaned up range/hosts configuration in test.bash (incl. propagation to subscripts) - added len-plotting to analysis - fixed erratic behavior by lowering chkpt to 1K from 10K - added recovery-generation timing - fixed always-multirecover bug - fixed set_yourhost omission for the joiner - added somewhat-off analysis for multirecover Modified Paths: -------------- ydb/trunk/README ydb/trunk/src/main.lzz.clamp ydb/trunk/tools/analysis.py ydb/trunk/tools/test.bash Modified: ydb/trunk/README =================================================================== --- ydb/trunk/README 2009-01-26 05:35:42 UTC (rev 1146) +++ ydb/trunk/README 2009-01-27 23:25:16 UTC (rev 1147) @@ -231,32 +231,45 @@ Period: 1/20-1/27 - DONE implement multihost -- TODO add simple, proper timestamped logging -- TODO see how much multihost recovery affects perf -- TODO look again at how much yielding affects perf -- TODO monitor memory usage -- TODO switch to btree -- TODO break down the red bar some more -- TODO see how much time difference there is -- TODO red bar: why are/aren't we saturating bandwidth? -- TODO understand the rest of the perf (eg stl map) -- TODO try scaling up + - not much, it only decreases the xfer time (which orig was thought to be the bottleneck) +- DONE see how much multihost recovery affects perf + - quite a bit! +- DONE look again at how much yielding affects perf + - not much +- DONE break down the red bar some more + - most of the time is spent in the dumping +- DONE understand the rest of the perf (eg stl map) + - DONE why the big jump in 400,000 ops? why all the unexpected ups & downs? + - due to the 10,000-txn quantum; lowering this to 1,000 made everything much + saner + - DONE how does the recovery state xfer time compare to what's expected? + - msgs smaller than expected, eg 300,000 txns * 2*4 bytes per txn = 2.4MB, + but msgs are ~2MB (compression, some random overwrites) + - xfer takes much longer than the theoretical time; 2MB on 1GbE = 16 ms, but + takes more around 50 ms +- DONE start building infrastructure for disk IO + +Period: 1/27- + +- TODO fix up analysis of multihost recovery - TODO implement checkpointing disk-based scheme - TODO implement log-based recovery; show that it sucks - TODO implement group (batch) commit for log-based recovery -- TODO talk - - motivation: log-based sucks, look into alternatives +- TODO try scaling up - TODO serialize outputs from the various clients to a single merger to (1) have ordering over the (timestamped) messages, and (2) avoid interleaved lines - -Period: 1/27- - +- TODO add simple, proper timestamped logging +- TODO see how much clock difference there is among the hosts +- TODO monitor memory usage +- TODO try improving map perf; switch to btree; try bulk loading - TODO detailed view of tps during recovery over time (should see various phases) - TODO later: runtime overhead of logging/tps under normal operation (scaled with # nodes?) - TODO later: timestamped logging? +- TODO talk + - motivation: log-based sucks, look into alternatives Longer term Modified: ydb/trunk/src/main.lzz.clamp =================================================================== --- ydb/trunk/src/main.lzz.clamp 2009-01-26 05:35:42 UTC (rev 1146) +++ ydb/trunk/src/main.lzz.clamp 2009-01-27 23:25:16 UTC (rev 1147) @@ -37,7 +37,8 @@ // Configuration. st_utime_t timeout; -int chkpt, accept_joiner_seqno, issuing_interval, min_ops, max_ops; +int chkpt, accept_joiner_seqno, issuing_interval, min_ops, max_ops, + stop_on_seqno; size_t accept_joiner_size; bool verbose, yield_during_build_up, yield_during_catch_up, dump, show_updates, count_updates, stop_on_recovery, general_txns, profile_threads, @@ -283,34 +284,47 @@ /** * Read a message. This is done in two steps: first by reading the length - * prefix, then by reading the actual body. + * prefix, then by reading the actual body. This function also provides a way + * to measure how much time is spent actually reading the message from the + * network. Such measurement only makes sense for large messages which take a + * long time to receive. * * \param[in] src The socket from which to read. * * \param[in] msg The protobuf to read into. * - * \param[in] timed Whether to make a note of the time at which the first piece of the - * message (the length) was received. Such measurement only makes sense for - * large messages which take a long time to receive. + * \param[out] start_time If not null, record the time at which we start to + * receive the message (after the length is received). * + * \param[out] stop_time If not null, record the time at which we finish + * receiving the message (before we deserialize the protobuf). + * + * \param[out] len If not null, record the size of the serialized message + * in bytes. + * * \param[in] timeout on each of the two read operations (first one is on * length, second one is on the rest). + * + * \return The length of the serialized message. */ template <typename T> -long long -readmsg(st_netfd_t src, T & msg, bool timed = false, st_utime_t timeout = - ST_UTIME_NO_TIMEOUT) +size_t +readmsg(st_netfd_t src, T & msg, long long *start_time = nullptr, long long + *stop_time = nullptr, st_utime_t timeout = ST_UTIME_NO_TIMEOUT) { // Read the message length. uint32_t len; checkeqnneg(st_read_fully(src, static_cast<void*>(&len), sizeof len, timeout), static_cast<ssize_t>(sizeof len)); - long long start_receive = timed ? current_time_millis() : -1; + if (start_time != nullptr) + *start_time = current_time_millis(); len = ntohl(len); #define GETMSG(buf) \ checkeqnneg(st_read_fully(src, buf, len, timeout), (int) len); \ + if (stop_time != nullptr) \ + *stop_time = current_time_millis(); \ check(msg.ParseFromArray(buf, len)); // Parse the message body. @@ -323,7 +337,7 @@ GETMSG(buf.get()); } - return start_receive; + return len; } /** @@ -336,7 +350,7 @@ readmsg(st_netfd_t src, st_utime_t timeout = ST_UTIME_NO_TIMEOUT) { T msg; - readmsg(src, msg, false, timeout); + readmsg(src, msg, nullptr, nullptr, timeout); return msg; } @@ -407,6 +421,11 @@ if (txn.seqno() == accept_joiner_seqno) { accept_joiner.set(); } + + if (txn.seqno() == stop_on_seqno) { + cout << "stopping on issue of seqno " << txn.seqno() << endl; + stop_hub.set(); + } } Txn txn; @@ -600,14 +619,18 @@ mii::const_iterator end = multirecover && mypos < nnodes - 1 ? map.lower_bound(interp(RAND_MAX, mypos + 1, nnodes)) : map.end(); cout << "generating recovery over " << begin->first << ".." - << (end == map.end() ? "end" : lexical_cast<string>(end->first)) - << " (node " << mypos << " of " << nnodes << ")" - << endl; + << (end == map.end() ? "end" : lexical_cast<string>(end->first)); + if (multirecover) + cout << " (node " << mypos << " of " << nnodes << ")"; + cout << endl; + long long start_snap = current_time_millis(); foreach (const pii &p, make_iterator_range(begin, end)) { Recovery_Pair *pair = recovery->add_pair(); pair->set_key(p.first); pair->set_value(p.second); } + cout << "generating recovery took " + << current_time_millis() - start_snap << " ms" << endl; recovery->set_seqno(seqno); send_states.push(recovery); } @@ -799,7 +822,7 @@ // Construct the initialization message. Init init; init.set_txnseqno(0); - init.set_multirecover(true); + init.set_multirecover(multirecover); foreach (replica_info r, replicas) { SockAddr *psa = init.add_node(); psa->set_host(r.host()); @@ -841,14 +864,15 @@ accept_joiner.waitset(); } Join join = readmsg<Join>(joiner); + replicas.push_back(replica_info(joiner, static_cast<uint16_t>(join.port()))); cout << "setting seqno to " << seqno << endl; init.set_txnseqno(seqno); + init.set_yourhost(replicas.back().host()); sendmsg(joiner, init); recover_signals.push(current_time_millis()); // Start streaming txns to joiner. cout << "start streaming txns to joiner" << endl; - replicas.push_back(replica_info(joiner, static_cast<uint16_t>(join.port()))); newreps.push(replicas.back()); handlers.insert(my_spawn(bind(handle_responses, joiner, ref(seqno), rid++, ref(recover_signals), false), @@ -959,15 +983,18 @@ recovery_builders.push_back(my_spawn(lambda() { // Read the recovery message. Recovery recovery; - long long receive_start = -1; + long long receive_start = 0, receive_end = 0; + size_t len = 0; { st_intr intr(stop_hub); - receive_start = readmsg(__ref(replicas)[__ctx(i)], recovery, true); + len = readmsg(__ref(replicas)[__ctx(i)], recovery, &receive_start, + &receive_end); } long long build_start = current_time_millis(); - cout << "got recovery message in " - << build_start - __ref(before_recv) << " ms (xfer took " - << build_start - receive_start << " ms)" << endl; + cout << "got recovery message of " << len << " bytes in " + << build_start - __ref(before_recv) << " ms: xfer took " + << receive_end - receive_start << " ms, deserialization took " + << build_start - receive_end << " ms" << endl; for (int i = 0; i < recovery.pair_size(); i++) { const Recovery_Pair &p = recovery.pair(i); __ref(map)[p.key()] = p.value(); @@ -1116,6 +1143,8 @@ "run the leader (run replica by default)") ("exit-on-recovery,x", po::bool_switch(&stop_on_recovery), "exit after the joiner fully recovers (for leader only)") + ("exit-on-seqno,X", po::value<int>(&stop_on_seqno)->default_value(-1), + "exit after txn seqno is issued (for leader only)") ("accept-joiner-size,s", po::value<size_t>(&accept_joiner_size)->default_value(0), "accept recovering joiner (start recovery) after DB grows to this size " @@ -1139,7 +1168,7 @@ ("leader-port,P", po::value<uint16_t>(&leader_port)->default_value(7654), "port the leader listens on") - ("chkpt,c", po::value<int>(&chkpt)->default_value(10000), + ("chkpt,c", po::value<int>(&chkpt)->default_value(1000), "number of txns before yielding/verbose printing") ("timelim,T", po::value<long long>(&timelim)->default_value(0), "general network IO time limit in milliseconds, or 0 for none") Modified: ydb/trunk/tools/analysis.py =================================================================== --- ydb/trunk/tools/analysis.py 2009-01-26 05:35:42 UTC (rev 1146) +++ ydb/trunk/tools/analysis.py 2009-01-27 23:25:16 UTC (rev 1147) @@ -1,10 +1,12 @@ #!/usr/bin/env python from __future__ import with_statement -import re, sys, itertools +import re, sys, itertools, colorsys from os.path import basename, realpath from pylab import * +class struct(object): pass + def getname(path): return basename(realpath(path)) def check(path): @@ -12,106 +14,137 @@ if 'got timeout' in f.read(): print 'warning: timeout occurred' -def agg(src): +def show_table(pairs): + def fmt(x): + s = str(x) + if s.endswith('.0'): return s[:-2] + p = s.index('.') + return s if p < 0 else s[:p+4] + cols = [ [heading] + map(fmt, col) for (heading, col) in pairs ] + widths = [ max(map(len, col)) for col in cols ] + return '\n'.join( + '|'.join( ('%%%ds' % width) % val for width, val in zip(widths, row) ) + for row in zip(*cols) ) + +def show_table1(dicts): + keys = dicts[0].keys() + return show_table([(k, [d[k] for d in dicts]) for k in keys]) + +def logextract(path, indexkey, pats): + check(path) + # Capture values from log using regex pats. + def getcaps(): + with file(path) as f: + caps = {} # captures: name -> int/float + sats = [ False for pat in pats ] + for line in f: +# if line == '\n': print '===', caps.keys(), ''.join('1' if s else '0' for s in sats) + for i, pat in enumerate(pats): + m = re.search(pat, line) + if m: + for k in m.groupdict(): + if k in caps: + caps[k + '0'] = caps[k] + caps.update((k, float(v)) for k,v in m.groupdict().iteritems()) + sats[i] = True + break + if all(sats): + sats = [ False for pat in pats ] +# print '!!!' + yield caps.copy() # [ caps[k] for k in keys ] + caps.clear() + # Aggregate the captured values. + caps = list(getcaps()) +# print show_table1(caps) + keys = [indexkey] + filter(lambda x: x != indexkey, caps[0].keys()) def gen(): - for index, tups in itertools.groupby(src, lambda x: x[0]): - yield list(tups) - a = array(list(gen())) + for index, ds in itertools.groupby(caps, lambda d: d[indexkey]): + ds = list(ds) + print [d['len'] for d in ds] + yield [ [d[k] for k in keys] for d in ds ] + a = array(list(gen())) # raw results indexes = a[:,0,0] - means = median(a,1) #a.mean(1) - stds = a.std(1) - tup = (indexes,) - for i in range(1, len(a[0,0])): - tup += (means[:,i], stds[:,i]) - stacked = hstack(map(lambda x: x.reshape((len(indexes),1)), tup)) - return tup + (stacked, a) + means = median(a,1) # or a.mean(1) + sds = a.std(1) + # Build result dict. + stacks = [ (indexkey, indexes) ] # no need to agg the index + for i,k in list(enumerate(keys))[1:]: # everything but index + stacks.append((k + ' mean', means[:,i])) + stacks.append((k + ' sd', sds[:,i])) + res = dict(stacks) + res['stacked'] = hstack(map(lambda (_,x): x.reshape((len(indexes), 1)), stacks)) + res['raw'] = a + print show_table(stacks) + print + return res def scaling(path): print '=== scaling ===' print 'file:', getname(path) - check(path) - def getpairs(): - with file(path) as f: - for line in f: - m = re.match( r'=== n=(?P<n>\d+) ', line ) - if m: - n = int(m.group('n')) - m = re.match( r'.*: issued .*[^.\d](?P<tps>[.\d]+) ?tps', line ) - if m: - tps = float(m.group('tps')) - yield (n, tps) - tups = agg(getpairs()) - ns, tpsmeans, tpssds, stacked, a = agg(getpairs()) - print 'n, tps mean, tps sd' - print stacked - print + res = logextract(path, 'n', [ + r'=== n=(?P<n>\d+) ', + r'issued .*\((?P<tps>[.\d]+) tps\)' ]) - errorbar(ns, tpsmeans, tpssds) + errorbar(res['n'], res['tps mean'], res['tps sd']) title('Scaling of baseline throughput with number of nodes') xlabel('Node count') ylabel('Mean TPS (stdev error bars)') - xlim(ns.min() - .5, ns.max() + .5) + xlim(res['n'].min() - .5, res['n'].max() + .5) ylim(ymin = 0) savefig('scaling.png') def run(blockpath, yieldpath): - for path, label in [#(blockpath, 'blocking scheme'), - (yieldpath, 'yielding scheme')]: - print '===', label, '===' + for path, titlestr, name in [#(blockpath, 'blocking scheme', 'block'), + (yieldpath, 'yielding scheme', 'yield')]: + print '===', titlestr, '===' print 'file:', getname(path) - check(path) - def getpairs(): - with file(path) as f: - seqno = dump = recv = buildup = catchup = total = None - for line in f: - m = re.match( r'=== seqno=(?P<seqno>\d+) ', line ) - if m: seqno = int(m.group('seqno')) - m = re.search( r'got recovery message in (?P<dump>\d+) ms \(xfer took (?P<recv>\d+) ms\)', line ) - if m: dump, recv = float(m.group('dump')), float(m.group('recv')) - m = re.search( r'built up .* (?P<time>\d+) ms', line ) - if m: buildup = float(m.group('time')) - m = re.search( r'replayer caught up; from backlog replayed \d+ txns .* in (?P<time>\d+) ms', line ) - if m: catchup = float(m.group('time')) - m = re.match( r'.*: recovering node caught up; took (?P<time>\d+) ?ms', line ) - if m: total = float(m.group('time')) - tup = (seqno, dump, recv, buildup, catchup, total) - if all(tup): - yield tup - seqno = dump = recv = buildup = catchup = total = None - seqnos, dumpmeans, dumpsds, recvmeans, recvsds, buildmeans, buildsds, \ - catchmeans, catchsds, totalmeans, totalsds, stacked, a = \ - agg(getpairs()) + res = logextract(path, 'seqno', + [ r'=== seqno=(?P<seqno>\d+) ', + r'got recovery message of (?P<len>\d+) bytes in (?P<dump>\d+) ms: xfer took (?P<recv>\d+) ms, deserialization took (?P<deser>\d+)', + r'built up .* (?P<buildup>\d+) ms', + r'generating recovery took (?P<gen>\d+) ms', + r'replayer caught up; from backlog replayed \d+ txns .* in (?P<catchup>\d+) ms', + r'.*: recovering node caught up; took (?P<total>\d+) ?ms' ] ) - print 'max seqno, dump mean, dump sd, recv mean, recv sd, build mean, build sd, catch mean, catch sd, total mean, total sd' - print stacked - print - + # Colors and positioning width = 5e4 - # From "zen and tea" on kuler.adobe.com - hue = lambda i: tuple(map(lambda x: float(x)/255, - [( 16, 34, 43), - (149,171, 99), - (189,214,132), - (226,240,214), - (246,255,224)][i+1])) - ehue = lambda i: hue(-1) # tuple(map(lambda x: min(1, x + .3), hue(i))) - bar(seqnos, dumpmeans, yerr = dumpsds, width = width, color = hue(0), - ecolor = ehue(0), label = 'State serialization') - bar(seqnos, recvmeans, yerr = recvsds, width = width, color = hue(0), - ecolor = ehue(0), label = 'State receive', bottom = dumpmeans) - bar(seqnos, buildmeans, yerr = buildsds, width = width, color = hue(1), - ecolor = ehue(1), label = 'Build-up', - bottom = dumpmeans + recvmeans) - bar(seqnos, catchmeans, yerr = catchsds, width = width, color = hue(2), - ecolor = ehue(2), label = 'Catch-up', - bottom = dumpmeans + recvmeans + buildmeans) + step = 1.0 / 5 + hues = ( colorsys.hls_to_rgb(step * i, .7, .5) for i in itertools.count() ) + ehues = ( colorsys.hls_to_rgb(step * i, .3, .5) for i in itertools.count() ) + widths = ( 2 * width - 2 * width / 5 * i for i in itertools.count() ) + offsets = ( width - 2 * width / 5 * i for i in itertools.count() ) + self = struct() + self.bottom = 0 - title('Recovery time over number of transactions') - xlabel('Transaction count (corresponds roughly to data size)') - ylabel('Mean time in ms (SD error bars)') - legend(loc = 'upper left') - savefig('run.png') + clf() + def mybar(yskey, eskey, label): + bar(res['seqno'] - offsets.next(), res[yskey], yerr = res[eskey], width = + widths.next(), color = hues.next(), edgecolor = (1,1,1), ecolor = + ehues.next(), label = label, bottom = self.bottom) + self.bottom += res[yskey] + mybar('dump mean', 'dump sd', 'State dump') + mybar('recv mean', 'recv sd', 'State receive') + mybar('deser mean', 'deser sd', 'State deserialization') + mybar('buildup mean', 'buildup sd', 'Build-up') + mybar('catchup mean', 'catchup sd', 'Catch-up') + + title('Recovery time of ' + titlestr + ' over data size') + xlabel('Transaction count (corresponds roughly to data size)') + ylabel('Mean time in ms (SD error bars)') + legend(loc = 'upper left') + + ax2 = twinx() + col = colorsys.hls_to_rgb(.6, .4, .4) + ax2.errorbar(res['seqno'], res['len mean'] / 1024, res['len sd'] / 1024, marker = 'o', + color = col) + ax2.set_ylabel('Size of serialized state (KB)', color = col) + ax2.set_ylim(ymin = 0) + for tl in ax2.get_yticklabels(): tl.set_color(col) + + xlim(xmin = min(res['seqno']) - width, xmax = max(res['seqno']) + width) + savefig(name + '.png') + def main(argv): if len(argv) <= 1: print >> sys.stderr, 'Must specify a command' Modified: ydb/trunk/tools/test.bash =================================================================== --- ydb/trunk/tools/test.bash 2009-01-26 05:35:42 UTC (rev 1146) +++ ydb/trunk/tools/test.bash 2009-01-27 23:25:16 UTC (rev 1147) @@ -9,17 +9,22 @@ tagssh() { ssh "$@" 2>&1 | python -u -c ' -import time, sys +import time, sys, socket +# def fmt(*xs): return " ".join(map(str, xs)) + "\n" +# s = socket.socket() +# s.connect(("localhost", 9876)) +# f = s.makefile() +f = sys.stdout while True: line = sys.stdin.readline() if line == "": break - print sys.argv[1], time.time(), ":\t", line, + print >> f, sys.argv[1], time.time(), ":\t", line, ' $1 } check-remote() { - if [[ ${force:-asdf} != asdf && `hostname` == yang-xps410 ]] - then echo 'running a remote command on your pc!' 1>&2 && exit 1 + if [[ ! ${remote:-} ]] + then 'running a remote command on your pc!' fi } @@ -129,36 +134,11 @@ local host="$1" shift scp -q "$(dirname "$0")/$script" "$host:" - tagssh "$host" "./$script" "$@" + tagssh "$host" "remote=1 ./$script" "$@" } -hosts() { - if [[ ${host:-} ]] ; then - echo $host - elif [[ ${range:-} ]] ; then - seq $range | sed 's/^/farm/; s/$/.csail/' - else - cat << EOF -farm1.csail -farm2.csail -farm3.csail -farm4.csail -farm5.csail -farm6.csail -farm7.csail -farm8.csail -farm9.csail -farm10.csail -farm11.csail -farm12.csail -farm13.csail -farm14.csail -EOF - fi -} - parhosts() { - hosts | xargs ${xargs--P9} -I^ "$@" + echo -n $hosts | xargs ${xargs--P9} -d' ' -I^ "$@" } parssh() { @@ -170,6 +150,7 @@ } parremote() { + export hosts range parhosts "./$script" remote ^ "$@" } @@ -235,7 +216,7 @@ " } -hosttops() { +tops() { xargs= parssh " echo hostname @@ -245,22 +226,17 @@ } hostargs() { - if [[ $range ]] - then "$@" $(seq $range | sed 's/^/farm/; s/$/.csail/') - else "$@" ${hosts[@]} - fi + "$@" $hosts } scaling-helper() { local leader=$1 shift - tagssh $leader "ydb/src/ydb -l -n $#" & + tagssh $leader "ydb/src/ydb -l -n $# -X 100000" & sleep .1 for rep in "$@" do tagssh $rep "ydb/src/ydb -n $# -H $leader" & done - sleep ${wait1:-10} - tagssh $leader 'pkill -sigint ydb' wait } @@ -274,21 +250,23 @@ # TODO: fix this to work also with `hosts`; move into repeat-helper that's run # via hostargs, and change the range= to hosts= full-scaling() { - local base=$1 out=scaling-log-$(date +%Y-%m-%d-%H:%M:%S-%N) - shift + local out=scaling-log-$(date +%Y-%m-%d-%H:%M:%S-%N) + local orighosts="$hosts" maxn=$(( $(echo $hosts | wc -w) - 1 )) ln -sf $out scaling-log - for n in {1..5} ; do # configurations - export range="$base $((base + n))" + for n in `seq $maxn -1 1` ; do # configurations stop for i in {1..5} ; do # trials echo === n=$n i=$i === + echo === n=$n i=$i === > `tty` scaling sleep 1 stop sleep .1 echo done + hosts="${hosts% *}" done >& $out + hosts="$orighosts" } run-helper() { @@ -324,6 +302,7 @@ stop for i in {1..5} ; do # trials echo === seqno=$seqno i=$i === + echo === seqno=$seqno i=$i === > `tty` run sleep 1 stop @@ -342,15 +321,9 @@ full-yield() { local out=yield-log-$(date +%Y-%m-%d-%H:%M:%S) ln -sf $out yield-log - extraargs='--yield-catch-up' full-run >& $out + extraargs="--yield-catch-up ${extraargs:-}" full-run >& $out } -full() { - full-block - full-yield - full-scaling -} - stop-helper() { tagssh $1 'pkill -sigint ydb' } @@ -375,8 +348,32 @@ # Use mssh to log in with password as root to each machine. mssh-root() { - : "${hosts:="$(hosts)"}" mssh -l root "$@" } -"$@" +# Set up hosts. +confighosts() { + if [[ ! ${remote:-} ]] ; then + if [[ ! "${hosts:-}" && ! "${range:-}" ]] + then range='1 14'; echo "warning: running with farms 1..14" 1>&2 + fi + if [[ "${range:-}" ]] + then hosts="$( seq $range | sed 's/^/farm/' )" + fi + hosts="$( echo -n $hosts )" + fi +} + +# Set up logger. +configlogger() { + if [[ ! ${remote:-} ]] ; then + ( + flock -n /tmp/ydbtest.socket + ) > /tmp/y + fi +} + +confighosts +#configlogger + +eval "$@" This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |