Thread: [Assorted-commits] SF.net SVN: assorted: [267] bib/trunk/sam.bib
                
                Brought to you by:
                
                    yangzhang
                    
                
            
            
        
        
        
    | 
      
      
      From: <yan...@us...> - 2008-01-20 18:26:47
       | 
| Revision: 267
          http://assorted.svn.sourceforge.net/assorted/?rev=267&view=rev
Author:   yangzhang
Date:     2008-01-20 10:26:49 -0800 (Sun, 20 Jan 2008)
Log Message:
-----------
added Sam's bib
Added Paths:
-----------
    bib/trunk/sam.bib
Added: bib/trunk/sam.bib
===================================================================
--- bib/trunk/sam.bib	                        (rev 0)
+++ bib/trunk/sam.bib	2008-01-20 18:26:49 UTC (rev 267)
@@ -0,0 +1,2594 @@
+%% DJA's bibtex file
+
+%% Daniel Abadi's publications in bibtex format
+
+%% Conference and Journal Papers
+
+@inproceedings{abadi-rdf,
+	title = "Scalable Semantic Web Data Management Using Vertical Partitioning",
+	year = "2007",
+        booktitle = "VLDB",
+        address = "Vienna, Austria",
+	venue = "VLDB",
+	pdfURL = "http://web.mit.edu/dna/www/abadirdf.pdf",
+	author = {Daniel J. Abadi and Adam Marcus and Samuel R. Madden and Kate Hollenbach},
+	abstract = "Efficient management of RDF data is an important factor in realizing the Semantic Web vision. Performance and scalability issues are becoming increasingly pressing as Semantic Web technology is applied to real-world applications. In this paper, we examine the reasons why current data management solutions for RDF data scale poorly, and explore the fundamental scalability limitations of these approaches. We review the state of the art for improving performance for RDF databases and consider a recent suggestion, 'property tables'. We then discuss practically and empirically why this solution has undesirable features. As an improvement, we propose an alternative solution: vertically partitioning the RDF data. We compare the performance of vertical partitioning with prior art on queries generated by a Web-based RDF browser over a large-scale (more than 50 million triples) catalog of library data.  Our results show that a vertical partitioned schema achieves similar performance to the property table technique while being much simpler to design. Further, if a column-oriented DBMS (a database architected specially for the vertically partitioned case) is used instead of a row-oriented DBMS, another order of magnitude performance improvement is observed, with query times dropping from minutes to several seconds.",
+	pdfKB = "246",
+	publicationtype = "Conference Paper",
+}
+
+@inproceedings{hstore,
+  author    = {Michael Stonebraker and Samuel R. Madden and Daniel J. Abadi and Stavros Harizopoulos and Nabil Hachem and Pat Helland},
+  title     = {The End of an Architectural Era (It's Time for a Complete Rewrite)},
+  booktitle = {VLDB},
+  year      = {2007},
+  address = {Vienna, Austria},
+  venue = "VLDB",
+  pdfURL = "http://web.mit.edu/dna/www/vldb07hstore.pdf",
+  abstract = "In previous papers, some of us predicted the end of 'one size fits all' as a commercial relational DBMS paradigm. These papers presented reasons and experimental evidence that showed that the major relational RDBMS vendors can be outperformed by 1-2 orders of magnitude by specialized engines in the data warehouse, stream processing, text, and scientific database markets. Assuming that specialized engines dominate these markets over time, the current relational DBMS code lines will be left with the business data processing (OLTP) market and hybrid markets where more than one capability is required. In this paper we show that current RDBMSs can be beaten by nearly two orders of magnitude in the OLTP market as well. The experimental evidence comes from comparing a new OLTP prototype, H-Store, which we have built at M.I.T., to a popular RDBMS on the standard transactional benchmark, TPC-C. We conclude that current RDBMS code lines, while attempting to be a 'one size fits all' solution, in fact, excel at nothing. Hence, they are 25 year old legacy code lines that should be retired in favor of a collection of 'from scratch' specialized engines. The DBMS vendors (and research community) should start with a clean sheet of paper and design systems for tomorrow's requirements, not continue to push code lines and architectures designed for yesterday's needs.",
+  pdfKB = "444",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{abadi-cidr,
+  author    = {Daniel J. Abadi},
+  title     = {Column Stores for Wide and Sparse Data},
+  booktitle = {CIDR},
+  year      = {2007},
+  address  = {Asilomar, CA, USA},
+  venue = "CIDR",
+  pdfURL = "http://web.mit.edu/dna/www/abadicidr07.pdf",
+  abstract = "While it is generally accepted that data warehouses and OLAP workloads are excellent applications for column-stores, this paper speculates that column-stores may well be suited for additional applications. In particular we observe that column-stores do not see a performance degradation when storing extremely wide tables, and column-stores handle sparse data very well. These two properties lead us to conjecture that column-stores may be good storage layers for Semantic Web data, XML data, and data with GEM-style schemas.",
+  pdfKB = "156",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{cstore-mat,
+  author    = {Daniel J. Abadi and Daniel S. Myers and David J. DeWitt and Samuel R. Madden},
+  title     = {Materialization Strategies in a Column-Oriented {DBMS}},
+  booktitle = {ICDE},
+  year      = {2007},
+  address  = {Istanbul, Turkey},
+  pages = {466--475},
+  venue = "ICDE",
+  pdfURL = "http://web.mit.edu/dna/www/abadiicde2007.pdf",
+  abstract = "There has been renewed interest in column-oriented database architectures in recent years. For read-mostly query workloads such as those found in data warehouse and decision support applications, column-stores have been shown to perform particularly well relative to row-stores. In order for column-stores to be readily adopted as a replacement for row-stores, however, they must present the same interface to client applications as do row stores, which implies that they must output row-store-style tuples. Thus, the input columns stored on disk must be converted to rows at some point in the query plan, but the optimal point at which to do the conversion is not obvious. This problem can be considered as the opposite of the projection problem in row-store systems: while row-stores need to determine where in query plans to place projection operators to make tuples narrower, column-stores need to determine when to combine single-column projections into wider tuples. This paper describes a variety of strategies for tuple construction and intermediate result representations and provides a systematic evaluation of these strategies.",
+  pdfKB = "327",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{cstore-perf,
+  author = {Stavros Harizopoulos and Velen Liang and Daniel J. Abadi and Samuel R. Madden},
+  title = {Performance Tradeoffs in Read-Optimized Databases},
+  booktitle = {VLDB},
+  year = {2006},
+  pages = {487--498},
+  address = {Seoul, Korea},
+  venue = "VLDB",
+  pdfURL = "http://web.mit.edu/dna/www/VLDB06.pdf",
+  abstract = "Database systems have traditionally optimized performance for write-intensive workloads. Recently, there has been renewed interest in architectures that optimize read performance by using column-oriented data representation and light-weight compression. This previous work has shown that under certain broad classes of workloads, column-based systems can outperform row-based systems. Previous work, however, has not characterized the precise conditions under which a particular query workload can be expected to perform better on a column-oriented database. In this paper we first identify the distinctive components of a read-optimized DBMS and describe our implementation of a high-performance query engine that can operate on both row and column-oriented data. We then use our prototype to perform an in-depth analysis of the tradeoffs between column and row-oriented architectures. We explore these tradeoffs in terms of disk bandwidth, CPU cache latency, and CPU cycles. We show that for most database workloads, a carefully designed column system can outperform a carefully designed row system, sometimes by an order of magnitude. We also present an analytical model to predict whether a given workload on a particular hardware configuration is likely to perform better on a row or column-based system.",
+  pdfKB = "354",
+  publicationtype = "Conference Paper",
+ }
+
+@inproceedings{cstore-comp,
+  author    = {Daniel J. Abadi and Samuel R. Madden and Miguel Ferreira},
+  title     = {Integrating Compression and Execution in Column-Oriented Database Systems},
+  booktitle = {SIGMOD},
+  year      = {2006},
+  address   = {Chicago, IL, USA},
+  pages     = {671--682},
+  venue = "SIGMOD",
+  pdfURL = "http://web.mit.edu/dna/www/abadisigmod06.pdf",
+  abstract = "Column-oriented database system architectures invite a re-evaluation of how and when data in databases is compressed. Storing data in a column-oriented fashion greatly increases the similarity of adjacent records on disk and thus opportunities for compression. The ability to compress many adjacent tuples at once lowers the per-tuple cost of compression, both in terms of CPU and space overheads. In this paper, we discuss how we extended C-Store (a column-oriented DBMS) with a compression sub-system. We show how compression schemes not traditionally used in row-oriented DBMSs can be applied to column-oriented systems.  We then evaluate a set of compression schemes and show that the best scheme depends not only on the properties of the data but also on the nature of the query workload.",
+  pdfKB = "265",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{reed,
+  author    = {Daniel J. Abadi and Samuel R. Madden and Wolfgang Lindner},
+  title     = {REED: Robust, Efficient Filtering and Event Detection in Sensor Networks},
+  booktitle = {VLDB},
+  year      = {2005},
+  address   = {Trondheim, Norway},
+  pages     = {769--780},
+  venue = "VLDB",
+  pdfURL = "http://web.mit.edu/dna/www/abadivldb05.pdf",
+  abstract = "This paper presents a set of algorithms for efficiently evaluating join queries over static data tables in sensor networks. We describe and evaluate three algorithms that take advantage of distributed join techniques. Our algorithms are capable of running in limited amounts of RAM, can distribute the storage burden over groups of nodes, and are tolerant to dropped packets and node failures. REED is thus suitable for a wide range of event-detection applications that traditional sensor network database and data collection systems cannot be used to implement.",
+  pdfKB = "292",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{cstore,
+  author    = {Michael Stonebraker and Daniel J. Abadi and Adam Batkin and Xuedong Chen and Mitch Cherniack and Miguel Ferreira and Edmond Lau and Amerson Lin and Samuel R. Madden and Elizabeth J. O'Neil and Patrick E. O'Neil and Alexander Rasin and Nga Tran and Stan B. Zdonik},
+  title     = {{C-Store: A Column-Oriented DBMS}},
+  booktitle = {VLDB},
+  year      = {2005},
+  address   = {Trondheim, Norway},
+  pages     = {553--564},
+  venue = "VLDB",
+  pdfURL = "http://web.mit.edu/dna/www/vldb.pdf",
+  abstract = "This paper presents the design of a read-optimized relational DBMS that contrasts sharply with most current systems, which are write-optimized. Among the many differences in its design are: storage of data by column rather than by row, careful coding and packing of objects into storage including main memory during query processing, storing an overlapping collection of column-oriented projections, rather than the current fare of tables and indexes, a non-traditional implementation of transactions which includes high availability and snapshot isolation for read-only transactions, and the extensive use of bitmap indexes to complement B-tree structures. We present preliminary performance data on a subset of TPC-H and show that the system we are building, C-Store, is substantially faster than popular commercial products. Hence, the architecture looks very encouraging.",
+  pdfKB = "170",
+  publicationtype = "Conference Paper",
+}
+
+@inproceedings{borealis,
+  author    = {Daniel J. Abadi and Yanif Ahmad and Magdalena Balazinska and Ugur Cetintemel and Mitch Cherniack and Jeong-Hyon Hwang and Wolfgang Lindner and Anurag S. Maskey and Alexander Rasin and Esther Ryvkina and Nesime Tatbul and Ying Xing and Stan B. Zdonik},
+  title     = {The Design of the Borealis Stream Processing Engine},
+  booktitle = {CIDR},
+  year      = {2005},
+  address  = {Asilomar, CA, USA},
+  venue = "CIDR",
+  pdfURL = "http://web.mit.edu/dna/www/cidr05.pdf",
+  abstract = "Borealis is a second-generation distributed stream processing engine that is being developed at Brandeis University, Brown University, and MIT. Borealis inherits core stream processing functionality from Aurora and distribution functionality from Medusa. Borealis modifies and extends both systems in non-trivial and critical ways to provide advanced capabilities that are commonly required by newly-emerging stream processing applications. In this paper, we outline the basic design and functionality of Borealis. Through sample real-world applications, we motivate the need for dynamically revising query results and modifying query specifications. We then describe how Borealis addresses these challenges through an innovative set of features, including revision records, time travel, and control lines. Finally, we present a highly flexible and scalable QoS-based optimization model that operates across server and sensor networks and a new fault-tolerance model with flexible consistency-availability trade-offs.",
+  pdfKB = "143",
+  publicationtype = "Conference Paper",
+}
+
+@misc{aurora,
+  author = {Daniel J. Abadi and Don Carney and  Ugur Cetintemel and Mitch Cherniack and Christian Convey and Sangdon Lee and Michael Stonebraker and Nesime Tatbul and Stan B. Zdonik},
+  title = {Aurora: A New Model and Architecture for Data Stream Management},
+  howpublished = {VLDB Journal, 12(2)},
+  month = {September},
+  year = {2003},
+  pages = {120--139},
+  venue = {VLDB Journal},
+  pdfURL = "http://web.mit.edu/dna/www/vldb095.pdf",
+  abstract = "This paper describes the basic processing model and architecture of Aurora, a new system to manage data streams for monitoring applications. Monitoring applications differ substantially from conventional business data processing. The fact that a software system must process and react to continual inputs from many sources (e.g., sensors) rather than from human operators requires one to rethink the fundamental architecture of a DBMS for this application area. In this paper, we present Aurora, a new DBMS currently under construction at Brandeis University, Brown University, and M.I.T. We first provide an overview of the basic Aurora model and architecture and then describe in detail a stream-oriented set of operators.",
+  pdfKB = "984",
+  publicationtype = "Journal Article",
+}
+
+%% Technical Reports
+
+@techreport{barton-benchmark,
+  title = {Using The Barton Libraries Dataset As An RDF Benchmark},
+  number = {MIT-CSAIL-TR-2007-036},
+  institution = {MIT},
+  year      = {2007},
+  author = {Daniel J. Abadi and Adam Marcus and Samuel R. Madden and Kate Hollenbach},
+  pdfURL = "http://web.mit.edu/dna/www/bench.pdf",
+  abstract = "This report describes the Barton Libraries RDF dataset and Longwell query benchmark that we use for our recent VLDB paper on Scalable Semantic Web Data Management Using Vertical Partitioning",
+  pdfKB = "357",
+  publicationtype = "Technical Report",
+}
+
+%% Demonstrations
+
+@misc{sensor-stream-integration,
+  author    = {Daniel J. Abadi and Wolfgang Lindner and Samuel R. Madden and Jorg Schuler},
+  title     = {An Integration Framework for Sensor Networks and Data Stream Management Systems},
+  howpublished = {Demonstration. VLDB},
+  year      = {2004},
+  pages     = {1361--1364},
+  address   = {Toronto, Canada},
+  venue = "VLDB",
+  pdfURL = "http://web.mit.edu/dna/www/vldb04.pdf",
+  abstract = "This demonstration shows an integrated query processing environment where users can seamlessly query both a data stream management system and a sensor network with one query expression. By integrating the two query processing systems, the optimization goals of the sensor network (primarily power) and server network (primarily latency and quality) can be unified into one quality of service metric.",
+  pdfKB = "116",
+  publicationtype = "Demonstration",
+}
+
+@misc{aurora-demo,
+  author    = {Daniel J. Abadi and Don Carney and Ugur Cetintemel and Mitch Cherniack and Christian Convey and Christina Erwin and Eddie Galvez and Matt Hatoun and Jeong-Hyon Hwang and Anurag S. Maskey and Alexander Rasin and A. Singer and Michael Stonebraker and Nesime Tatbul and Ying Xing and R. Yan and Stan B. Zdonik},
+  title     = {Aurora: A Data Stream Management System},
+  howpublished = {Demonstration. SIGMOD},
+  year      = {2003},
+  pages     = {666-666},
+  address   = {San Diego, CA, USA},
+  venue = "SIGMOD",
+  pdfURL = "http://web.mit.edu/dna/www/AuroraDemo.pdf",
+  abstract = "Streams are continuous data feeds generated by such sources as sensors, satellites, and stock feeds. Monitoring applications track data from numerous streams, filtering them for signs of abnormal activity, and processing them for purposes of filtering, aggregation, reduction, and correlation. Aurora is a general-purpose data stream manager that is being designed and implemented (at Brandeis University, Brown University, and M.I.T.) to efficiently support a variety of real-time monitoring applications.",
+  pdfKB = "151",
+  publicationtype = "Demonstration",
+}
+
+@misc{vcoko,
+  author    = {Daniel J. Abadi and Mitch Cherniack},
+  title     = {Visual COKO: A Debugger for Query Optimizer Development},
+  howpublished = {Demonstration. SIGMOD},
+  year      = {2002},
+  address  = {Madison, Wisconsin},
+  pages    = {617--617},
+  venue = "SIGMOD",
+  pdfURL = "http://web.mit.edu/dna/www/vcoko.pdf",
+  abstract = "Query optimization generates plans to retrieve data requested by queries. Query rewriting, which is the first step of this process, rewrites a query expression into an equivalent form to prepare it for plan generation. COKO-KOLA introduced a new approach to query rewriting that enables query rewrites to be formally verified using an automated theorem prover. KOLA is a language for expressing term rewriting rules that can be 'fired' on query expressions. COKO is a language for expressing query rewriting transformations that are too complex to express with simple KOLA rules. COKO is a programming language designed for query optimizer development. Programming languages require debuggers, and in this demonstration, we illustrate our COKO debugger: Visual COKO. Visual COKO enables a query optimization developer to visually trace the execution of a COKO transformation. At every step of the transformation, the developer can view a tree-display that illustrates how the original query expression has evolved.",
+  pdfKB = "111",
+  publicationtype = "Demonstration",
+}
+
+%% Theses
+
+@misc{abadi-anaphora,
+  author    = {Daniel J. Abadi},
+  title     = {Comparing Domain-Specific and Non-Domain-Specific Anaphora Resolution Techniques},
+  howpublished = {Cambridge University MPhil Dissertation},
+  year      = {2003},
+  pdfURL = "http://web.mit.edu/dna/www/FinalMPhil.pdf",
+  abstract = "Three different pronominal anaphora resolution techniques are examined. The first two techniques compare traditional salience-based approaches when different amounts of syntactic information are available. The improvement in pronoun resolution precision is quantified when a large scale grammar is used to extract detailed syntactic information rather than inferring this information robustly using pattern matching. The third technique uses domain knowledge instead of syntactic information to resolve pronouns. The domain knowledge required for this algorithm can be automatically acquired from a database backend schema representation of the domain. Each of these three techniques is evaluated separately, and then the domain-specific and non-domain-specific algorithms are combined and evaluated.",
+  pdfKB = "164",
+  publicationtype = "Thesis",
+}
+
+@misc{abadi-ugrad-thesis,
+  author    = {Daniel J. Abadi},
+  title     = {Visual COKO: A Debugger for Query Optimizer Development},
+  howpublished = {Brandeis University Senior Honors Thesis},
+  year      = {2002},
+  pdfURL = "http://web.mit.edu/dna/www/VisualCOKOThesis.pdf",
+  abstract = "Query optimization generates plans to retrieve data requested by queries, and query rewriting (rewriting a query expression into an equivalent form to prepare it for plan generation) is typically the first step. COKO-KOLA introduced a new approach to query rewriting that enables query rewrites to be formally verified using an automated theorem prover. KOLA is a language for expressing rewriting rules that can be fired on query expressions. COKO is a language for expressing query rewriting transformations that are too complex to express with simple KOLA rules. COKO is a programming language designed for query optimizer development. Programming languages require debuggers, and this paper describes a COKO debugger: Visual COKO. Visual COKO enables a query optimization developer to visually trace the execution of a COKO transformation. At every step of the transformation, the developer can view a tree-display that illustrates how the original query expression has evolved. Rule-based query rewriting and the COKO-KOLA project are described for background. Then the COKO syntax is summarized from the point of view of the COKO programmer using an example query transformation that converts query predicates to conjunctive normal form. Visual COKO is described and instructions for its use are presented. Finally, a description of its implementation is given.",
+  pdfKB = "132",
+  publicationtype = "Thesis",
+}
+
+
+@inproceedings{james-reserve,
+    Year = {2001},
+    Title = {Habitat monitoring: Application driver for wireless communications technology},
+    Booktitle = {{ACM SIGCOMM} Workshop on Data Communications in Latin America and the Caribbean},
+    Author = {A. Cerpa and J. Elson and D.Estrin and L. Girod and M. Hamilton and and J. Zhao}}
+
+@misc{xmlql,
+    Year = {1998},
+    Title = {{XML-QL}: A Query Language for {XML}},
+    Author = {A. Deutsch and M. Fernandez and D. Floresc and , A. Levy and D. Suciu},
+    Note = {http://www.w3.org/TR/NOTE-xml-ql}}
+
+@inproceedings{doubly-pipelined,
+    Month = {December},
+    Year = {1991},
+    Title = {Dataflow Query Execution in a Parallel Main-Memory Environment},
+    Pages = {68-77},
+    Booktitle = {Proc. of the International Conference on Parallel and Distributed Information Systems ({PDIS}},
+    Author = {A.N. Wilschut and P.M.G Apers}}
+
+@inproceedings{storm-petrels,
+    Year = {2002},
+    Title = {Wireless Sensor Networks for Habitat Monitoring},
+    Booktitle = {{ACM Workshop on Sensor Networks and Applications}},
+    Author = {Alan Mainwaring and Joseph Polastre and Robert Szewczyk and David Culler}}
+
+@inproceedings{awoo-tos,
+    Month = {July},
+    Year = {2001},
+    Title = {A Transmission Control Scheme for Media Access in Sensor Networks},
+    Booktitle = {ACM Mobicom},
+    Author = {Alec Woo and David Culler}}
+
+@inproceedings{parallel-agg-wisc,
+    Year = {1995},
+    Title = {Adaptive Parallel Aggregation Algorithms},
+    Booktitle = {ACM SIGMOD},
+    Author = {Ambuj Shatdal and Jeffrey Naughton}}
+
+@manual{postgres-manual,
+    Organization = {UC Berkeley},
+    Year = {1995},
+    Title = {The POSTGRES95 User Manual},
+    Optnote = {http://www.uoi.gr/services/noc/documentation/postgress/pg95user.html},
+    Author = {Andrew Yu and Jolly Chen}}
+
+@inproceedings{p2prouting,
+    Month = {July},
+    Year = {2002},
+    Title = {Routing Indices For Peer-to-Peer Systems},
+    Booktitle = {ICDCS},
+    Author = {Arturo Crespo, Hector {Garcia-Molina}}}
+
+@inproceedings{gupta-aggs,
+    Year = {1995},
+    Title = {Aggregate Query Processing in Data Warehousing Environments},
+    Booktitle = {VLDB},
+    Author = {Ashish Gupta and Venky Harinarayan and Dallan Quass}}
+
+@inproceedings{loop-detectors,
+    Year = {1998},
+    Title = {Vehicle Reidentification and Travel Time Measurement Using the Existing Loop Detector Infrastructure},
+    Booktitle = {Transportation Research Board},
+    Author = {Ben Coifman}}
+
+@inproceedings{mit-span,
+    Month = {July},
+    Year = {2001},
+    Title = {Span: An Energy-Efficient Coordination Algorithm for Topology Maintenance in Ad-Hoc Wireless Networks},
+    Booktitle = {ACM MobiCom},
+    Author = {Benjie Chen and Kyle Jamieson and Hari Balakrishnan and Robert Morris}}
+
+@inproceedings{dquob,
+    Year = {2000},
+    Title = {{dQUOB}: Managing Large Data Flows Using Dynamic Embedded Queries},
+    Booktitle = {Proceedings of High Performance Distributed Computing},
+    Author = {Beth Plale and Karsten Schwan}}
+
+@inproceedings{timeseriessequence,
+    Month = {May},
+    Year = {1994},
+    Title = {Fast Subsequence Matching in Time-Series Databases},
+    Pages = {419-422},
+    Booktitle = {Proceedings of the ACM SIGMOD 1994},
+    Author = {C. Faloutsos and M. Ranganathan and Y. Manolopoulos}}
+
+@article{monmasidney,
+    Journal = {Mathematics of Operations Research},
+    Year = {1979},
+    Title = {Sequencing with SeriesParallel Precedence Constraints},
+    Author = {C. L. Monma and J.B. Sidney}}
+
+@article{cbe-hvac,
+    Year = {2002},
+    Title = {{Multi-Sensor Single Actuator Control of HVAC Systems}},
+    Booktitle = {Internation Conference for Enhanced Building Operations},
+    Author = {C. Lin and C. Federspiel and D. Auslander}}
+
+@inproceedings{streamjoin,
+    Year = {2000},
+    Title = {Streamjoin: a generic database approach to support the class of stream-oriented applications},
+    Pages = {83-91},
+    Booktitle = {Database Engineering and Applications Symposium},
+    Author = {C. Nippl and R. Rantzau and B. Mitschang}}
+
+@article{approx-caching,
+    Journal = {SIGMOD},
+    Year = {2002},
+    Title = {Best Effort Cache Sychronization with Source Cooperation},
+    Author = {C. Olston and J.Widom}}
+
+@inproceedings{lottery-scheduler,
+    Year = {1994},
+    Title = {Lottery Scheduling: Flexible Proportional-Share Resource Management},
+    Address = {Monterey CA (USA)},
+    Pages = {1--12},
+    Booktitle = {Proceedings of the Symposium on Operating Systems Design and Implementation},
+    Author = {Carl Waldspurger and William Weihl}}
+
+@book{traffic-engineering,
+    Year = {1994},
+    Title = {Traffic and Highway Engineering for Developments},
+    Address = {Oxford},
+    Publisher = {Blackwell Scientific Publications},
+    Author = {Carol Ashley}}
+
+@inproceedings{directed-diffusion,
+    Month = {August},
+    Year = {2000},
+    Title = {Directed Diffusion: A Scalable and Robust Communication Paradigm for Sensor Networks},
+    Address = {Boston, {MA}},
+    Booktitle = {{MobiCOM}},
+    Author = {Chalermek Intanagonwiwat and Ramesh Govindan and Deborah Estrin}}
+
+@unpublished{ucla-aggregation,
+    Year = {2001},
+    Title = {Impact of network density on data aggregation in wireless sensor networks},
+    Author = {Chalermek Intanagonwiwat and Deborah Estrin and Ramesh Govindan and John Heidemann},
+    Note = {ICDCS-22},
+    Month = {November}}
+
+@unpublished{UW-api,
+    Year = {2001},
+    Title = {{UW-API: A Network Routing Application Programmer's Interface}},
+    Author = {P. Ramanathan and K. Saluja and K-C. Wang and T. Clouqueur},
+    Note = {Draft version 1.0},
+    Month = {January}}
+
+@inproceedings{aodv,
+    Year = {1999},
+    Title = {Ad-hoc On-Demand Distance Vector Routing},
+    Booktitle = {{Workshop on Mobile Computing and Systems Applications}},
+    Author = {Charles E. Perkins and Elizabeth M. Royer}}
+
+@misc{xbow,
+    Title = {Wireless Sensor Networks (Mica Motes)},
+    Author = {Crossbow, Inc.},
+    Note = {http://www.xbow.com/Products/Wireless{\_}Sensor{\_}Networks.htm}}
+
+@inproceedings{uclasensors,
+    Month = {May},
+    Year = {2001},
+    Title = {Instrumenting the World with Wireless Sensor Networks},
+    Address = {Salt Lake City, Utah},
+    Booktitle = {International Conference on Acoustics, Speech, and Signal Processing ({ICASSP} 2001)},
+    Author = {D. Estrin and L. Girod and G. Pottie and M. Srivastava}}
+
+@techreport{dstottstreamsoverview,
+    Year = {1989},
+    Title = {Stream Databases},
+    Institution = {UCLA},
+    Author = {D. Stott Parker},
+    Note = {Final Report for NSF Grant IRI 89-17907}}
+
+@article{nj-data-report,
+    Journal = {Data Engineering Bulletin},
+    Year = {1997},
+    Title = {The {New Jersey} Data Reduction Report},
+    Number = {4},
+    Pages = {3-45},
+    Author = {Daniel Barbar\'a and William DuMouchel and Christos Faloutsos and Peter J. Haas and Joseph M. Hellerstein and Yannis E. Ioannidis and H.V. Jagadish and Theodore Johnson and Raymond T. Ng and Viswanath Poosala and Kenneth A. Ross and Kenneth C. Sevcik},
+    Volume = {20}}
+
+@inproceedings{dewitt-bandjoin,
+    Year = {1991},
+    Title = {An Evaluation of Non-Equijoin Algorithms},
+    Address = {Barcelona, Spain},
+    Booktitle = {Proceedings of the 17th Conference on Very Large Databases},
+    Author = {David DeWitt and Jeffrey Naughton and Donovan Schneider}}
+
+
+@article{active-networks,
+    Journal = {{IEEE Communications}},
+    Year = {1997},
+    Title = {A Survery of Active Network Research},
+    Author = {David L. Tennenhouse and Jonathan M. Smith and W. David Sincoskie and David J. Wetherall and Gary J. Minden}}
+
+@unpublished{deepak-paper,
+    Year = {2002},
+    Title = {Complex Behavior at Scale: An Experimental Study of Low-Power Wireless Sensor Networks},
+    Author = {Deepak Ganesan and Bhaskar Krishnamachari and Alec Woo and David Culler and Deborah Estrin and Stephen Wickera},
+    Note = {Under submission. Available at: http://lecs.cs.ucla.edu/~deepak/PAPERS/empirical.pdf},
+    Month = ...
 
[truncated message content] |