You can subscribe to this list here.
2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(19) |
Nov
(34) |
Dec
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
2004 |
Jan
(3) |
Feb
(1) |
Mar
(4) |
Apr
(31) |
May
(15) |
Jun
(33) |
Jul
(3) |
Aug
(7) |
Sep
(2) |
Oct
(7) |
Nov
(2) |
Dec
|
2005 |
Jan
|
Feb
(7) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2008 |
Jan
|
Feb
(3) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2009 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(1) |
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(13) |
Oct
|
Nov
|
Dec
|
2011 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(1) |
Aug
|
Sep
(2) |
Oct
|
Nov
(3) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
(1) |
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Friedrich L. <fr...@us...> - 2004-06-14 23:16:18
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv849 Modified Files: CHANGELOG fetchipac.8 Log Message: put sqlite database file always into the ipac datadir /var/lib/ipac Index: fetchipac.8 =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/fetchipac.8,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- fetchipac.8 13 Jun 2004 01:04:17 -0000 1.5 +++ fetchipac.8 14 Jun 2004 23:16:09 -0000 1.6 @@ -64,7 +64,7 @@ .IR methods . The storage methods provided are: .\" =()<.I @<STORAGEMETHODS>@.>()= -.I gdbm plain-file postgre. +.I gdbm sqlite plain-file postgre . The default storage method used is .\" =()<.IR @<DEFAULT_STORAGE>@.>()= .IR gdbm. Index: CHANGELOG =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/CHANGELOG,v retrieving revision 1.21 retrieving revision 1.22 diff -u -d -r1.21 -r1.22 --- CHANGELOG 13 Jun 2004 01:14:28 -0000 1.21 +++ CHANGELOG 14 Jun 2004 23:16:09 -0000 1.22 @@ -18,6 +18,8 @@ # 1.29 +- in case of the sqlite database the database file will be placed at + /var/lib/ipac with the selected database name plus the suffix ".db" (friedl) - added support for the sqlite database backend (friedl/Simon Hausman) - factor out common code for all sql backends (friedl + Simon Hausman) |
From: Friedrich L. <fr...@us...> - 2004-06-14 23:05:58
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv25666 Modified Files: TODO Log Message: update TODO for 1.29 Index: TODO =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/TODO,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- TODO 9 May 2004 23:16:56 -0000 1.5 +++ TODO 14 Jun 2004 23:05:49 -0000 1.6 @@ -23,8 +23,7 @@ - create documentation document with plain-file storage method description -Things to do for ipac-ng 1.29: +Things to do for ipac-ng 1.30: -- add support for SQLite database backend - add support for MySQL database backend |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:14:42
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/sqlite In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21070/storage/sqlite Added Files: Makefile.in sqlite.c Log Message: factor out common code for all sql backends added support for the sqlite database backend --- NEW FILE: Makefile.in --- # $Id: Makefile.in,v 1.1 2004/06/13 01:14:31 friedl Exp $ # Makefile for sqlite # NEEDLIBS=-lsqlite SMETHOD=sqlite CFLAGS=@CFLAGS@ CC=@CC@ DEFS=@DEFS@ all: libstor$(SMETHOD).a libstor$(SMETHOD).a: sqlite.o ar -crus libstor$(SMETHOD).a $? %.o: %.c ../../config.h ../../ipac.h $(CC) -c -I. -I../.. $(DEFS) $(CFLAGS) $< -o $@ clean: rm -f *.a *.o distclean: clean rm -f Makefile *~ --- NEW FILE: sqlite.c --- /* * * $Id: sqlite.c,v 1.1 2004/06/13 01:14:31 friedl Exp $ * * sqlite backend to ipac-ng Copyright (C) 2001-2004 Al Zakharov, 2004 Simon Hausmann * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 675 Mass * Ave, Cambridge, MA 02139, USA. * */ #include "ipac.h" #include "../sharedsql/sharedsql.h" #include <sqlite.h> static sqlite *conn; static char **res; static int nRows; static int nCols; static char *err; static int sqlite_stor_open (int flag); static void sql_close_connection (); static const storage_method_t interface_entry = { "sqlite", sqlite_stor_open, sql_stor_store_record, sql_stor_list_timestamps, sql_stor_get_records, sql_stor_get_summary, sql_stor_delete_record, sql_stor_close }; const storage_method_t *ipac_sm_interface_sqlite () { return &interface_entry; } static int sqlite_stor_open (int flag); static int sql_execute_query (const char *query); static int sql_execute_simple_query (const char *query); static void sql_clear_result(); static int sql_number_of_affected_rows (); static const char *sql_result_get_value (int row, int column); static void sql_close_connection(); /* include shared sql routines */ #include "../sharedsql/sharedsql.c" static int sqlite_stor_open (int flag) { sql_stor_open(); conn = sqlite_open (dbname, 0, &err); if (err) { fprintf (stderr, "Connection to database '%s' failed.\n", dbname); fprintf (stderr, "%s", err); sqlite_freemem (err); return 1; } if (sqlite_get_table (conn, "select name from sqlite_master where type='table' and name='logs'", &res, &nRows, &nCols, 0) != SQLITE_OK || nRows != 1) { fprintf (stderr, "ipac-ng[sqlite]: creating logs table\n"); sqlite_exec (conn, "CREATE TABLE logs (" "rule_name character varying(64) NOT NULL," "bytes bigint NOT NULL," "pkts bigint NOT NULL," "that_time bigint NOT NULL," "hostname character varying(256)" ")", 0, 0, 0); } storage_opened = 1; return 0; } static int sql_execute_query (const char *query) { DPRINTF ("%s\n", query); int resultCode = sqlite_get_table (conn, query, &res, &nRows, &nCols, &err); if (resultCode != SQLITE_OK) { fprintf (stderr, "%s : SQL command (%s) failed\nError: %s\n", me, query, err); DPRINTF ("failed: %s\n", err); sql_clear_result(); return -1; } return 0; } static int sql_execute_simple_query (const char *query) { DPRINTF ("%s\n", query); if (sqlite_exec (conn, query, 0, 0, 0) != SQLITE_OK) { DPRINTF ("failed: %s\n", err); return -1; } return 0; } static void sql_clear_result() { sqlite_free_table (res); } static int sql_number_of_affected_rows () { return nRows; } static const char *sql_result_get_value (int row, int column) { /* + 1 because the first row contains the headers */ return res[((row + 1) * nCols) + column]; } static void sql_close_connection() { sqlite_close (conn); } |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:14:41
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/sharedsql In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21070/storage/sharedsql Added Files: sharedsql.c sharedsql.h Log Message: factor out common code for all sql backends added support for the sqlite database backend --- NEW FILE: sharedsql.h --- #ifndef SHAREDSQL_H #define SHAREDSQL_H #ifdef DEBUG_DB #define DEBUG_DB_LOGFILE "dblog" static int fd; static void debuglog(const char *format, ...); #define DPRINTF(fmt, args...) debuglog(fmt, ##args) #else #define DPRINTF(fmt, args...) #endif static void sql_stor_open (); static int sql_stor_store_record (const data_record_type *data); static int sql_stor_list_timestamps (timestamp_t start, timestamp_t end, timestamp_t **data, timestamp_t *just_before, timestamp_t *just_after, char *ahost); static int sql_stor_get_records (timestamp_t timestamp_b, timestamp_t timestamp_e, data_record_type **data, char *filter); static int sql_stor_get_summary (timestamp_t timestamp_b, timestamp_t timestamp_e, data_record_type **data, char *filter); static int sql_stor_delete_record (timestamp_t timestamp); static void sql_stor_clear (); static void sql_stor_close (); #endif /* SHAREDSQL_H */ --- NEW FILE: sharedsql.c --- /* * * $Id: sharedsql.c,v 1.1 2004/06/13 01:14:30 friedl Exp $ * * generic functions equal for all sql backends * Copyright (C) 2001-2003 Al Zakharov, 2003-2004 Friedrich Lobenstock, 2004 Simon Hausmann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <stdio.h> #include <string.h> #include <dirent.h> #include <errno.h> #include <stdlib.h> #include <sys/param.h> #include <unistd.h> #include <time.h> #include <sys/types.h> #include <unistd.h> #include <stdarg.h> #include <fcntl.h> #include <sys/stat.h> extern char *ahost; /* the statical variables used for returning results */ static data_record_type *_timestamp_lst = NULL; static int _timestamp_max = 0; static rule_type *_rules_lst = NULL; static int _rules_max = 0; #ifdef DEBUG_DB static int fd = 0; #endif /* a safety margin of 0.5 percent for memory allocations */ #define MEMORY_SAFETY_MARGIN(x) (int)(x * 0.005 + 0.5) static void sql_stor_open () { #ifdef DEBUG_DB char *log_file; // construct the name of the logfile log_file = xmalloc(strlen(datadir) + sizeof(DEBUG_DB_LOGFILE) + 2); sprintf(log_file, "%s/" DEBUG_DB_LOGFILE, datadir); // open the logfile for appending fd = open (log_file, O_CREAT|O_WRONLY|O_APPEND|O_SYNC); #endif } static int sql_stor_store_record (const data_record_type *data) { rule_type *rule, *firstrule; char wh_exec[380]; firstrule = data->firstrule; if (firstrule == NULL) return 0; /* DPRINTF ("BEGIN\n"); if (sql_execute_simple_query ("BEGIN")) return 1; */ for (rule = firstrule; rule; rule = rule->next) { sprintf (wh_exec, "INSERT INTO logs (rule_name, bytes, pkts, that_time, hostname) " "VALUES ('%s', '%llu', '%llu', '%lu', '%s')", rule->name, rule->bytes, rule->pkts, data->timestamp, hostname); DPRINTF ("sql_stor_store_record\n"); /* FIXME: we don't report back error's for now actually we accept that single entries might fail, eg. they are double, but that's why we have uniq indices :-) */ sql_execute_simple_query (wh_exec); } /* DPRINTF ("COMMIT\n"); return sql_execute_simple_query ("COMMIT"); */ return 0; } static int sql_stor_list_timestamps (timestamp_t start, timestamp_t end, timestamp_t **data, timestamp_t *just_before, timestamp_t *just_after, char *ahost) { int i; timestamp_t *ts_list = NULL; int ts_list_len = 0; char wh_exec[250]; DPRINTF ("sql_stor_list_timestamps (%lu, %lu, **data, %s, %s, %s)\n", start, end, (just_before!=NULL) ? "*just_before" : "NULL", (just_after!=NULL) ? "*just_after" : "NULL", (ahost!=NULL) ? ahost : "NULL"); sprintf (wh_exec, "SELECT distinct that_time FROM logs WHERE "); if (ahost) sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); sprintf (wh_exec+strlen (wh_exec), "that_time between %lu and %lu " "ORDER BY that_time ASC", start, end); if (sql_execute_query (wh_exec)) { return -1; } ts_list_len = sql_number_of_affected_rows (); if (ts_list_len > 0) { ts_list = xmalloc (ts_list_len * sizeof (timestamp_t)); for (i = 0; i < ts_list_len; i++) ts_list[i] = strtoul (sql_result_get_value (i, 0), NULL, 10); sql_clear_result(); if (just_before != NULL) { sprintf (wh_exec, "SELECT that_time FROM logs WHERE " "that_time < %lu ORDER BY that_time DESC LIMIT 1", start); if (sql_execute_query (wh_exec)) { free (ts_list); return -1; } if (sql_number_of_affected_rows () > 0) *just_before = strtoul (sql_result_get_value (0, 0), NULL, 10); else *just_before = (timestamp_t) -1 ; sql_clear_result (); } if (just_after != NULL) { sprintf (wh_exec, "SELECT that_time FROM logs WHERE " "that_time > %lu ORDER BY that_time ASC LIMIT 1", end); if (sql_execute_query (wh_exec)) { free (ts_list); return -1; } if (sql_number_of_affected_rows () > 0) *just_after = strtoul (sql_result_get_value (0, 0), NULL, 10); else *just_after = (timestamp_t) -1; sql_clear_result(); } *data = ts_list; } DPRINTF ("sql_stor_list_timestamps returning: %i\n", ts_list_len); return ts_list_len; } static int sql_stor_get_records (timestamp_t timestamp_b, timestamp_t timestamp_e, data_record_type **data, char *filter) { rule_type *r, *r1 = NULL, *rules; int i, nr_timestamps, index, nr_rules; timestamp_t timestamp_akt; char wh_exec[320]; /* char *tmp; */ DPRINTF ("sql_stor_get_records (%lu, %lu, **data, %s)\n", timestamp_b, timestamp_e, (filter!=NULL) ? filter : "NULL"); if (timestamp_e) { sprintf (wh_exec, "SELECT COUNT(*) FROM (SELECT DISTINCT that_time FROM logs WHERE "); if (ahost) sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); sprintf (wh_exec+strlen (wh_exec), "that_time between '%lu' and '%lu'", timestamp_b, timestamp_e); if (filter) sprintf (wh_exec+strlen (wh_exec), " AND rule_name like '%%%s%%'", filter); strcat (wh_exec, ")"); if (sql_execute_query (wh_exec)) { sql_close_connection (); exit (-1); } nr_timestamps = strtol (sql_result_get_value (0,0), NULL, 10); DPRINTF ("result: %u timestamps\n", nr_timestamps); sql_clear_result(); } else { nr_timestamps = 1; DPRINTF ("searching for exactly one timestamp: %u\n", timestamp_b); } sprintf (wh_exec, "SELECT rule_name, bytes, pkts, that_time, hostname FROM logs WHERE "); if (ahost) sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); if (timestamp_e) { sprintf (wh_exec+strlen (wh_exec), "that_time between '%lu' and '%lu'", timestamp_b, timestamp_e); } else { sprintf (wh_exec+strlen (wh_exec), "that_time = '%lu'", timestamp_b); } if (filter) sprintf (wh_exec+strlen (wh_exec), " AND rule_name like '%%%s%%'", filter); if (timestamp_e) sprintf (wh_exec+strlen (wh_exec), " ORDER BY that_time"); if (sql_execute_query (wh_exec)) { sql_close_connection (); exit (-1); } nr_rules = sql_number_of_affected_rows (); DPRINTF ("result: %u data records\n", nr_rules); DPRINTF ("starting to convert data into our format\n"); /* create record_data_type. */ if (nr_timestamps > _timestamp_max) { /* need to increase size of data_record_type array */ if (_timestamp_lst != NULL) { /* as realloc does not support a count parameter we free the old list first and then call calloc again */ DPRINTF ("freeing memory of old timestamp list of size %i\n", _timestamp_max); free (_timestamp_lst); } /* always add a safety margin */ _timestamp_max = nr_timestamps + MEMORY_SAFETY_MARGIN(nr_timestamps); DPRINTF ("allocating memory for timestamp list with %i elements\n", _timestamp_max); DPRINTF (" calloc (cnt %i, size %u) = %lu bytes\n", _timestamp_max, sizeof (data_record_type), (long)((long)_timestamp_max * (long)sizeof (data_record_type))); _timestamp_lst = (data_record_type *)calloc (_timestamp_max, sizeof (data_record_type)); if (_timestamp_lst == NULL) { fprintf (stderr,"%s: data_record_type calloc(cnt %i, size %u) failed: %s\n", me, _timestamp_max, sizeof (data_record_type), strerror(errno)); DPRINTF ("failed: %s\n", strerror(errno)); sql_clear_result (); sql_close_connection (); exit (-1); } } *data = _timestamp_lst; /* create rule_type. */ if (nr_rules > _rules_max) { /* need to increase size of rule_type array */ if (_rules_lst != NULL) { /* as realloc does not support a count parameter we free the old list first and then call calloc again */ DPRINTF ("freeing memory of old timestamp list of size %i\n", _rules_max); free (_rules_lst); } /* always add a safety margin */ _rules_max = nr_rules + MEMORY_SAFETY_MARGIN(nr_rules); DPRINTF ("allocating memory for rule list with %i elements\n", _rules_max); DPRINTF (" calloc (cnt %i, size %u) = %lu bytes\n", _rules_max, sizeof (rule_type), (long)((long)_rules_max * (long)sizeof (rule_type))); _rules_lst = (rule_type *)calloc (_rules_max, sizeof (rule_type)); if (_rules_lst == NULL) { fprintf (stderr,"%s: rule_type calloc(cnt %i, size %u) failed: %s\n", me, _rules_max, sizeof (rule_type), strerror(errno)); DPRINTF ("failed: %s\n", strerror(errno)); sql_clear_result (); sql_close_connection (); exit (-1); } } rules = _rules_lst; index = -1; timestamp_akt = 0; for (i = 0; i < nr_rules; i++) { // currently timestamp_t is of type time_t which is essentially long timestamp_t tstamp_new = strtol ((char *) sql_result_get_value (i,3), NULL, 10); if (tstamp_new != timestamp_akt) { // do we have a new timestamp? timestamp_akt = tstamp_new; index++; if (index > nr_timestamps) { fprintf (stderr,"%s: We got more records than timestamps " "were reported before. This should not happen!\n", me); DPRINTF ("We got more records than timestamps " "were reported before. This should not happen!\n"); sql_clear_result (); sql_close_connection (); exit (-1); } (*data)[index].timestamp = timestamp_akt; /* FIXME: Two records with the same 'that_time' field but different hostname fields get listed as being from the hostname of the first record. Therefore it makes no sense and is plain wrong to store the hostname in this query. */ /* tmp = (char *)PQgetvalue (res, i, 4); (*data)[index].machine_name = calloc(1, strlen(tmp)+1); if ((*data)[index].machine_name == NULL) { fprintf (stderr,"%s: calloc(1, size %u) for string \"%s\" failed: %s\n", me, strlen(tmp)+1, tmp, strerror(errno)); DPRINTF ("calloc(1, size %u) for string \"%s\" failed: %s\n", strlen(tmp)+1, tmp, strerror(errno)); sql_clear_result (); sql_close_connection (); exit (-1); } memcpy((*data)[index].machine_name, tmp, strlen(tmp)+1); */ (*data)[index].machine_name = NULL; (*data)[index].firstrule = NULL; r1 = NULL; } r = &rules[i]; r->next = NULL; if (r1 == NULL) (*data)[index].firstrule = r; else r1->next = r; r1 = r; // never copy more than MAX_RULE_NAME_LENGTH+1 bytes from the resulting rule name strncpy (r->name, (char *) sql_result_get_value (i, 0), MAX_RULE_NAME_LENGTH+1); // make sure it's a null terminated string r->name[MAX_RULE_NAME_LENGTH] = '\0'; r->bytes = strtoull (sql_result_get_value (i, 1), NULL, 10); r->pkts = strtoull (sql_result_get_value (i, 2), NULL, 10); #ifdef DEBUG_DB_LEVEL2 DPRINTF ("Record: %s, bytes %llu, pkts %llu\n", r->name, r->bytes, r->pkts); #endif } sql_clear_result (); DPRINTF ("data conversion finished\n"); DPRINTF ("sql_stor_get_records returning: %i\n", index+1); return index+1; } static int sql_stor_get_summary (timestamp_t timestamp_b, timestamp_t timestamp_e, data_record_type **data, char *filter) { rule_type *r, *r1 = NULL; int i, nr_timestamps = 0, index; timestamp_t timestamp_akt; char wh_exec[320]; DPRINTF ("sql_stor_get_summary (%lu, %lu, **data, %s)\n", timestamp_b, timestamp_e, (filter!=NULL) ? filter : "NULL"); if (!timestamp_e) timestamp_e = timestamp_b; if (ahost) sprintf (wh_exec, "SELECT rule_name, sum (bytes), sum (pkts), hostname FROM logs " "WHERE " "hostname = '%s' and that_time between '%lu' and '%lu' ", ahost, timestamp_b, timestamp_e); else sprintf (wh_exec, "SELECT rule_name, sum (bytes), sum (pkts), hostname FROM logs " "WHERE " "that_time between '%lu' and '%lu' ", timestamp_b, timestamp_e); if (filter) sprintf (wh_exec+strlen (wh_exec), " and rule_name like '%%%s%%'", filter); sprintf (wh_exec+strlen (wh_exec), " group by rule_name, hostname"); if (sql_execute_query (wh_exec)) { sql_close_connection (); exit (-1); } DPRINTF ("starting to convert data into our format\n"); /* create record_data_type. */ *data = (data_record_type *)xmalloc (sizeof (data_record_type)); index = -1; timestamp_akt = 0; for (i = 0; i < sql_number_of_affected_rows (); i++) { timestamp_t tstamp_new = timestamp_b; if (tstamp_new != timestamp_akt) { // do we have a new timestamp? timestamp_akt = tstamp_new; index++; if (index > nr_timestamps) { fprintf (stderr,"We got more records then timestamps " "were reported before. This should not happen\n"); DPRINTF ("We got more records then timestamps " "were reported before. This should not happen\n"); sql_clear_result (); sql_close_connection (); exit (-1); } (*data)[index].timestamp = timestamp_akt; /* FIXME: Two records with the same 'that_time' field but different hostname fields get listed as being from the hostname of the first record. Therefore it makes no sense and is plain wrong to store the hostname in this query. */ /* tmp = (char *)PQgetvalue (res, i, 4); (*data)[index].machine_name = calloc(1, strlen(tmp)+1); if ((*data)[index].machine_name == NULL) { fprintf (stderr,"%s: calloc(1, size %u) for string \"%s\" failed: %s\n", me, strlen(tmp)+1, tmp, strerror(errno)); DPRINTF ("calloc(1, size %u) for string \"%s\" failed: %s\n", strlen(tmp)+1, tmp, strerror(errno)); sql_clear_result (); sql_close_connection (); exit (-1); } memcpy((*data)[index].machine_name, tmp, strlen(tmp)+1); */ (*data)[index].machine_name = NULL; (*data)[index].firstrule = NULL; r1 = NULL; } r = new_rule (); if (r1 == NULL) (*data)[index].firstrule = r; else r1->next = r; r1 = r; // never copy more than MAX_RULE_NAME_LENGTH+1 bytes from the resulting rule name strncpy (r->name, sql_result_get_value (i, 0), MAX_RULE_NAME_LENGTH+1); // make sure it's a null terminated string r->name[MAX_RULE_NAME_LENGTH] = '\0'; r->bytes = strtoull (sql_result_get_value (i, 1), NULL, 10); r->pkts = strtoull (sql_result_get_value (i, 2), NULL, 10); } sql_clear_result (); DPRINTF ("data conversion finished\n"); DPRINTF ("sql_stor_get_summary returning: %i\n", index+1); return index+1; } static int sql_stor_delete_record (timestamp_t timestamp) { char wh_exec[120]; sprintf (wh_exec, "DELETE FROM logs WHERE that_time = '%lu'", timestamp); DPRINTF ("sql_stor_delete_record\n"); return sql_execute_simple_query (wh_exec); } /* free data we kept in memory */ static void sql_stor_clear () { DPRINTF ("sql_stor_clear() \n"); if (_timestamp_lst != NULL) { DPRINTF ("freeing data_record_type array with %i elements\n", _timestamp_max); free_data_record_type_array (_timestamp_lst, _timestamp_max); _timestamp_lst = NULL; _timestamp_max = 0; } if (_rules_lst != NULL) { DPRINTF ("freeing rule_type array with %i elements\n", _rules_max); free (_rules_lst); _rules_lst = NULL; _rules_max = 0; } DPRINTF ("sql_stor_clear finished\n"); } static void sql_stor_close () { DPRINTF ("sql_stor_close()\n"); /* clean up first */ sql_stor_clear (); sql_close_connection (); storage_opened = 0; DPRINTF ("sql_stor_close finished\n"); #ifdef DEBUG_DB /* close logfile */ if (fd != -1) { close (fd); } #endif } #ifdef DEBUG_DB static void debuglog (const char *format, ...) { va_list arg; time_t tm; char logline[512]; // construct the date string like syslog does, eg. Sep 26 00:23:04 tm = time(NULL); strftime(logline, sizeof(logline)-1, "%b %e %H:%M:%S ", localtime(&tm)); snprintf (logline+strlen(logline), sizeof(logline)-strlen(logline)-1, "fetchipac[%i]: ", getpid()); va_start (arg, format); vsnprintf(logline+strlen(logline), sizeof(logline)-strlen(logline)-1, format, arg); va_end (arg); // write the log message to the logfile if (fd != -1) { write (fd, logline, strlen(logline)); } } #endif |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:14:41
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/postgre In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21070/storage/postgre Modified Files: postgre.c Log Message: factor out common code for all sql backends added support for the sqlite database backend Index: postgre.c =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/storage/postgre/postgre.c,v retrieving revision 1.30 retrieving revision 1.31 diff -u -d -r1.30 -r1.31 --- postgre.c 9 May 2004 23:13:40 -0000 1.30 +++ postgre.c 13 Jun 2004 01:14:30 -0000 1.31 @@ -3,7 +3,7 @@ * $Id$ * * postgresql backend to ipac-ng - * Copyright (C) 2001-2003 Al Zakharov + * Copyright (C) 2001-2003 Al Zakharov, 2003-2004 Friedrich Lobenstock, 2004 Simon Hausman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,88 +25,41 @@ */ #include "ipac.h" -#include <stdio.h> -#include <string.h> -#include <dirent.h> -#include <errno.h> -#include <stdlib.h> -#include <sys/param.h> -#include <unistd.h> +#include "postgre.h" +#include "../sharedsql/sharedsql.h" #include <libpq-fe.h> -#include <time.h> -#include <sys/types.h> -#include <unistd.h> -#include <stdarg.h> -#include <fcntl.h> -#include <sys/stat.h> - -extern char *ahost; static char *pgoptions = NULL; static char *pgtty = NULL; -#ifdef DEBUG_DB -#define DEBUG_DB_LOGFILE "dblog" -static int fd; -void debuglog(const char *format, ...); -#define DPRINTF(fmt, args...) debuglog(fmt, ##args) -#else -#define DPRINTF(fmt, args...) -#endif - static PGconn *conn; static PGresult *res; -/* the statical variables used for returning results */ -static data_record_type *_timestamp_lst = NULL; -static int _timestamp_max = 0; -static rule_type *_rules_lst = NULL; -static int _rules_max = 0; - -/* a safety margin of 0.5 percent for memory allocations */ -#define MEMORY_SAFETY_MARGIN(x) (int)(x * 0.005 + 0.5) +static int postgre_stor_open (int flag); -/* plain file ipac interface entries */ -int postgre_stor_open (int flag); -int postgre_stor_store_record (const data_record_type *data); -int postgre_stor_list_timestamps (timestamp_t start, timestamp_t end, - timestamp_t **data, timestamp_t *just_before, - timestamp_t *just_after, char *ahost); -int postgre_stor_get_records (timestamp_t timestamp_b, timestamp_t timestamp_e, - data_record_type **data, char *filter); -int postgre_stor_get_summary (timestamp_t timestamp_b, timestamp_t timestamp_e, - data_record_type **data, char *filter); -int postgre_stor_delete_record (timestamp_t timestamp); -void postgre_stor_close (); -void postgre_stor_clear (); +static void sql_close_connection (); static const storage_method_t interface_entry = { "postgre", postgre_stor_open, - postgre_stor_store_record, - postgre_stor_list_timestamps, - postgre_stor_get_records, - postgre_stor_get_summary, - postgre_stor_delete_record, - postgre_stor_close + sql_stor_store_record, + sql_stor_list_timestamps, + sql_stor_get_records, + sql_stor_get_summary, + sql_stor_delete_record, + sql_stor_close }; const storage_method_t *ipac_sm_interface_postgre () { return &interface_entry; } -int postgre_stor_open (int flag) -{ -#ifdef DEBUG_DB - char *log_file; - - // construct the name of the logfile - log_file = xmalloc(strlen(datadir) + sizeof(DEBUG_DB_LOGFILE) + 2); - sprintf(log_file, "%s/" DEBUG_DB_LOGFILE, datadir); +/* include shared sql routines */ +#include "../sharedsql/sharedsql.c" - // open the logfile for appending - fd = open (log_file, O_CREAT|O_WRONLY|O_APPEND|O_SYNC); -#endif +static int postgre_stor_open (int flag) +{ + sql_stor_open(); conn = PQsetdbLogin (dbhost, dbport, pgoptions, pgtty, dbname, dbuser, dbpass); @@ -117,506 +70,54 @@ DPRINTF ("failed postgre_stor_open\n"); PQfinish (conn); - return (1); + return -1; } DPRINTF ("postgre_stor_open\n"); storage_opened = 1; - return (0); -} - -int postgre_stor_store_record (const data_record_type *data) -{ - rule_type *rule, *firstrule; - char wh_exec[380]; - - firstrule = data->firstrule; - - if (firstrule == NULL) - return 0; - - for (rule = firstrule; rule; rule = rule->next) { - sprintf (wh_exec, "INSERT INTO logs (rule_name, bytes, pkts, that_time, hostname) " - "VALUES ('%s', '%llu', '%llu', '%lu', '%s')", - rule->name, rule->bytes, rule->pkts, data->timestamp, hostname); - - DPRINTF ("postgre_stor_store_record\n"); - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_COMMAND_OK) { - fprintf (stderr, "%s : %s\n", me, PQresultErrorMessage (res)); - } - PQclear (res); - } return 0; } -int postgre_stor_list_timestamps (timestamp_t start, timestamp_t end, - timestamp_t **data, timestamp_t *just_before, - timestamp_t *just_after, char *ahost) +static int sql_execute_query (const char *query) { - int i; - timestamp_t *ts_list = NULL; - int ts_list_len = 0; - char wh_exec[250]; - - DPRINTF ("postgre_stor_list_timestamps (%lu, %lu, **data, %s, %s, %s)\n", - start, end, (just_before!=NULL) ? "*just_before" : "NULL", - (just_after!=NULL) ? "*just_after" : "NULL", (ahost!=NULL) ? ahost : "NULL"); - - sprintf (wh_exec, "SELECT distinct that_time FROM logs WHERE "); - if (ahost) - sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); - sprintf (wh_exec+strlen (wh_exec), "that_time between %lu and %lu " - "ORDER BY that_time ASC", start, end); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "%s: %s\n", me, PQresultErrorMessage (res)); - - DPRINTF ("%s: %s\n", me, PQresultErrorMessage (res)); - - return (-1); - } - ts_list_len = PQntuples (res); - - if (ts_list_len > 0) { - ts_list = xmalloc (PQntuples (res) * sizeof (timestamp_t)); - for (i = 0; i < ts_list_len; i++) - ts_list[i] = strtoul ((char *) PQgetvalue (res, i, 0), NULL, 0); - PQclear (res); - - if (just_before != NULL) { - sprintf (wh_exec, "SELECT that_time FROM logs WHERE " - "that_time < %lu ORDER BY that_time DESC LIMIT 1", start); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "%s: %s\n", me, PQresultErrorMessage (res)); - - DPRINTF ("%s: %s\n", me, PQresultErrorMessage (res)); - - if (ts_list) free (ts_list); - return (-1); - } - if (PQntuples (res) > 0) - *just_before = strtoul ((char *) PQgetvalue (res, 0, 0), NULL, 0); - else - *just_before = (timestamp_t) -1 ; - PQclear (res); - } - - if (just_after != NULL) { - sprintf (wh_exec, "SELECT that_time FROM logs WHERE " - "that_time > %lu ORDER BY that_time ASC LIMIT 1", end); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "%s: %s\n", me, PQresultErrorMessage (res)); - - DPRINTF ("%s: %s\n", me, PQresultErrorMessage (res)); - - if (ts_list) free (ts_list); - return (-1); - } - if (PQntuples (res) > 0) - *just_after = strtoul ((char *) PQgetvalue (res, 0, 0), NULL, 0); - else - *just_after = (timestamp_t) -1; - PQclear (res); - } - - *data = ts_list; - + DPRINTF ("%s\n", query); + res = PQexec (conn, query); + if (!res || (PQresultStatus (res) != PGRES_COMMAND_OK && PQresultStatus (res) != PGRES_TUPLES_OK)) { + fprintf (stderr, "%s : SQL command (%s) failed\nError: %s\n", me, query, PQresultErrorMessage (res)); + DPRINTF ("failed: %s\n", PQresultErrorMessage (res)); + sql_clear_result(); + return -1; } - - DPRINTF ("postgre_stor_list_timestamps returning: %i\n", ts_list_len); - return ts_list_len; + return 0; } -int postgre_stor_get_records (timestamp_t timestamp_b, timestamp_t timestamp_e, - data_record_type **data, char *filter) +static int sql_execute_simple_query (const char *query) { - rule_type *r, *r1 = NULL, *rules; - int i, nr_timestamps, index, nr_rules; - timestamp_t timestamp_akt; - char wh_exec[320]; - /* char *tmp; */ - - DPRINTF ("postgre_stor_get_records (%lu, %lu, **data, %s)\n", - timestamp_b, timestamp_e, (filter!=NULL) ? filter : "NULL"); - - if (timestamp_e) { - sprintf (wh_exec, "SELECT count (distinct that_time) FROM logs WHERE "); - if (ahost) - sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); - sprintf (wh_exec+strlen (wh_exec), "that_time between '%lu' and '%lu'", - timestamp_b, timestamp_e); - if (filter) - sprintf (wh_exec+strlen (wh_exec), " AND rule_name like '%%%s%%'", - filter); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "%s: PQexec of (%s) command didn't return tuples" - " properly\nlibpq says: %s", me, wh_exec, - PQresultErrorMessage (res)); - - DPRINTF ("%s\n", PQresultErrorMessage (res)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - nr_timestamps = strtol (PQgetvalue (res,0,0),0,0); - DPRINTF ("result: %u timestamps\n", nr_timestamps); - PQclear (res); - } else { - nr_timestamps = 1; - DPRINTF ("searching for exactly one timestamp: %u\n", timestamp_b); - } - - sprintf (wh_exec, "SELECT rule_name, bytes, pkts, that_time, hostname FROM logs WHERE "); - if (ahost) - sprintf (wh_exec+strlen (wh_exec), "hostname = '%s' AND ", ahost); - if (timestamp_e) { - sprintf (wh_exec+strlen (wh_exec), "that_time between '%lu' and '%lu'", - timestamp_b, timestamp_e); - } else { - sprintf (wh_exec+strlen (wh_exec), "that_time = '%lu'", timestamp_b); - } - if (filter) - sprintf (wh_exec+strlen (wh_exec), " AND rule_name like '%%%s%%'", filter); - if (timestamp_e) - sprintf (wh_exec+strlen (wh_exec), " ORDER BY that_time"); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "%s: PQexec of (%s) command didn't return tuples" - " properly\nlibpq says: %s", me, wh_exec, - PQresultErrorMessage (res)); - - DPRINTF ("%s\n", PQresultErrorMessage (res)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - nr_rules = PQntuples (res); - DPRINTF ("result: %u data records\n", nr_rules); - - DPRINTF ("starting to convert data into our format\n"); - - /* create record_data_type. */ - if (nr_timestamps > _timestamp_max) { - /* need to increase size of data_record_type array */ - - if (_timestamp_lst != NULL) { - /* as realloc does not support a count parameter we free the old list first - and then call calloc again */ - DPRINTF ("freeing memory of old timestamp list of size %i\n", _timestamp_max); - free (_timestamp_lst); - } - - /* always add a safety margin */ - _timestamp_max = nr_timestamps + MEMORY_SAFETY_MARGIN(nr_timestamps); - - DPRINTF ("allocating memory for timestamp list with %i elements\n", _timestamp_max); - DPRINTF (" calloc (cnt %i, size %u) = %lu bytes\n", _timestamp_max, sizeof (data_record_type), (long)((long)_timestamp_max * (long)sizeof (data_record_type))); - _timestamp_lst = (data_record_type *)calloc (_timestamp_max, sizeof (data_record_type)); - - if (_timestamp_lst == NULL) { - fprintf (stderr,"%s: data_record_type calloc(cnt %i, size %u) failed: %s\n", me, _timestamp_max, sizeof (data_record_type), strerror(errno)); - - DPRINTF ("failed: %s\n", strerror(errno)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - } - *data = _timestamp_lst; - - /* create rule_type. */ - if (nr_rules > _rules_max) { - /* need to increase size of rule_type array */ - - if (_rules_lst != NULL) { - /* as realloc does not support a count parameter we free the old list first - and then call calloc again */ - DPRINTF ("freeing memory of old timestamp list of size %i\n", _rules_max); - free (_rules_lst); - } - - /* always add a safety margin */ - _rules_max = nr_rules + MEMORY_SAFETY_MARGIN(nr_rules); - - DPRINTF ("allocating memory for rule list with %i elements\n", _rules_max); - DPRINTF (" calloc (cnt %i, size %u) = %lu bytes\n", _rules_max, sizeof (rule_type), (long)((long)_rules_max * (long)sizeof (rule_type))); - _rules_lst = (rule_type *)calloc (_rules_max, sizeof (rule_type)); + int resultCode; - if (_rules_lst == NULL) { - fprintf (stderr,"%s: rule_type calloc(cnt %i, size %u) failed: %s\n", me, _rules_max, sizeof (rule_type), strerror(errno)); - - DPRINTF ("failed: %s\n", strerror(errno)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - } - rules = _rules_lst; - - index = -1; - timestamp_akt = 0; - for (i = 0; i < nr_rules; i++) { - // currently timestamp_t is of type time_t which is essentially long - timestamp_t tstamp_new = strtol ((char *) PQgetvalue (res,i,3), NULL, 10); - if (tstamp_new != timestamp_akt) { // do we have a new timestamp? - timestamp_akt = tstamp_new; - index++; - if (index > nr_timestamps) { - fprintf (stderr,"%s: We got more records than timestamps " - "were reported before. This should not happen!\n", me); - - DPRINTF ("We got more records than timestamps " - "were reported before. This should not happen!\n"); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - (*data)[index].timestamp = timestamp_akt; - /* FIXME: Two records with the same 'that_time' field but different - hostname fields get listed as being from the hostname of - the first record. Therefore it makes no sense and is plain - wrong to store the hostname in this query. */ - - /* tmp = (char *)PQgetvalue (res, i, 4); - (*data)[index].machine_name = calloc(1, strlen(tmp)+1); - - if ((*data)[index].machine_name == NULL) { - fprintf (stderr,"%s: calloc(1, size %u) for string \"%s\" failed: %s\n", me, strlen(tmp)+1, tmp, strerror(errno)); - - DPRINTF ("calloc(1, size %u) for string \"%s\" failed: %s\n", strlen(tmp)+1, tmp, strerror(errno)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - memcpy((*data)[index].machine_name, tmp, strlen(tmp)+1); */ - (*data)[index].machine_name = NULL; - - (*data)[index].firstrule = NULL; - r1 = NULL; - } - - r = &rules[i]; - r->next = NULL; - - if (r1 == NULL) - (*data)[index].firstrule = r; - else - r1->next = r; - r1 = r; - - // never copy more than MAX_RULE_NAME_LENGTH+1 bytes from the resulting rule name - strncpy (r->name, (char *) PQgetvalue (res, i, 0), MAX_RULE_NAME_LENGTH+1); - // make sure it's a null terminated string - r->name[MAX_RULE_NAME_LENGTH] = '\0'; - r->bytes = strtoull ((char *) PQgetvalue (res, i, 1), NULL, 10); - r->pkts = strtoull ((char *) PQgetvalue (res, i, 2), NULL, 10); - -#ifdef DEBUG_DB_LEVEL2 - DPRINTF ("Record: %s, bytes %llu, pkts %llu\n", r->name, r->bytes, r->pkts); -#endif - } - PQclear (res); - - DPRINTF ("data conversion finished\n"); - DPRINTF ("postgre_stor_get_records returning: %i\n", index+1); - return index+1; + resultCode = sql_execute_query (query); + if (resultCode >= 0) + sql_clear_result(); + return resultCode; } -int postgre_stor_get_summary (timestamp_t timestamp_b, timestamp_t timestamp_e, - data_record_type **data, char *filter) +static void sql_clear_result() { - rule_type *r, *r1 = NULL; - int i, nr_timestamps = 0, index; - timestamp_t timestamp_akt; - char wh_exec[320]; - - DPRINTF ("postgre_stor_get_summary (%lu, %lu, **data, %s)\n", - timestamp_b, timestamp_e, (filter!=NULL) ? filter : "NULL"); - - if (!timestamp_e) - timestamp_e = timestamp_b; - - if (ahost) - sprintf (wh_exec, "SELECT rule_name, sum (bytes), sum (pkts), hostname FROM logs " - "WHERE " - "hostname = '%s' and that_time between '%lu' and '%lu' ", - ahost, timestamp_b, timestamp_e); - else - sprintf (wh_exec, "SELECT rule_name, sum (bytes), sum (pkts), hostname FROM logs " - "WHERE " - "that_time between '%lu' and '%lu' ", - timestamp_b, timestamp_e); - if (filter) - sprintf (wh_exec+strlen (wh_exec), " and rule_name like '%%%s%%'", filter); - - sprintf (wh_exec+strlen (wh_exec), " group by rule_name, hostname"); - - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - if (!res || PQresultStatus (res) != PGRES_TUPLES_OK) { - fprintf (stderr, "PQexec of (%s) command didn't return tuples" - " properly\nlibpq says: %s", wh_exec, - PQresultErrorMessage (res)); - - DPRINTF ("%s: %s\n", me, PQresultErrorMessage (res)); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - - DPRINTF ("starting to convert data into our format\n"); - - /* create record_data_type. */ - *data = (data_record_type *)xmalloc (sizeof (data_record_type)); - - index = -1; - timestamp_akt = 0; - for (i = 0; i < PQntuples (res); i++) { - timestamp_t tstamp_new = timestamp_b; - if (tstamp_new != timestamp_akt) { // do we have a new timestamp? - timestamp_akt = tstamp_new; - index++; - if (index > nr_timestamps) { - fprintf (stderr,"We got more records then timestamps " - "were reported before. This should not happen\n"); - - DPRINTF ("We got more records then timestamps " - "were reported before. This should not happen\n"); - - PQclear (res); - PQfinish(conn); - exit (-1); - } - (*data)[index].timestamp = timestamp_akt; - (*data)[index].machine_name = xstrdup ((char *) PQgetvalue (res, i, 3)); - (*data)[index].firstrule = NULL; - r1 = NULL; - } - r = new_rule (); - if (r1 == NULL) - (*data)[index].firstrule = r; - else - r1->next = r; - r1 = r; - - // never copy more than MAX_RULE_NAME_LENGTH+1 bytes from the resulting rule name - strncpy (r->name, (char *) PQgetvalue (res, i, 0), MAX_RULE_NAME_LENGTH+1); - // make sure it's a null terminated string - r->name[MAX_RULE_NAME_LENGTH] = '\0'; - r->bytes = strtoull ((char *) PQgetvalue (res, i, 1), NULL, 10); - r->pkts = strtoull ((char *) PQgetvalue (res, i, 2), NULL, 10); - } PQclear (res); - - DPRINTF ("data conversion finished\n"); - DPRINTF ("postgre_stor_get_summary returning: %i\n", index+1); - return index+1; } -int postgre_stor_delete_record (timestamp_t timestamp) +static int sql_number_of_affected_rows () { - char wh_exec[120]; - - sprintf (wh_exec, "DELETE FROM logs WHERE that_time = '%lu'", timestamp); - - DPRINTF ("postgre_stor_delete_record\n"); - DPRINTF ("%s\n", wh_exec); - - res = PQexec (conn, wh_exec); - PQclear (res); - return 0; + return PQntuples (res); } -void postgre_stor_close () +static const char *sql_result_get_value (int row, int column) { - DPRINTF ("postgre_stor_close()\n"); - - /* clean up first */ - postgre_stor_clear (); - - PQfinish (conn); - storage_opened = 0; - DPRINTF ("postgre_stor_close finished\n"); - -#ifdef DEBUG_DB - /* close logfile */ - if (fd != -1) { - close (fd); - } -#endif - + return PQgetvalue (res, row, column); } -/* free data we kept in memory */ -void postgre_stor_clear () +static void sql_close_connection() { - DPRINTF ("postgre_stor_clear() \n"); - - if (_timestamp_lst != NULL) { - DPRINTF ("freeing data_record_type array with %i elements\n", _timestamp_max); - free_data_record_type_array (_timestamp_lst, _timestamp_max); - _timestamp_lst = NULL; - _timestamp_max = 0; - } - - if (_rules_lst != NULL) { - DPRINTF ("freeing rule_type array with %i elements\n", _rules_max); - free (_rules_lst); - _rules_lst = NULL; - _rules_max = 0; - } - DPRINTF ("postgre_stor_clear finished\n"); + PQfinish (conn); } -#ifdef DEBUG_DB -void debuglog (const char *format, ...) -{ - va_list arg; - time_t tm; - char logline[512]; - - // construct the date string like syslog does, eg. Sep 26 00:23:04 - tm = time(NULL); - strftime(logline, sizeof(logline)-1, "%b %e %H:%M:%S ", localtime(&tm)); - - snprintf (logline+strlen(logline), sizeof(logline)-strlen(logline)-1, "fetchipac[%i]: ", getpid()); - - va_start (arg, format); - vsnprintf(logline+strlen(logline), sizeof(logline)-strlen(logline)-1, format, arg); - va_end (arg); - - // write the log message to the logfile - if (fd != -1) { - write (fd, logline, strlen(logline)); - } -} -#endif |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:14:40
|
Update of /cvsroot/ipac-ng/ipac-ng/storage In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21070/storage Modified Files: Makefile.in Log Message: factor out common code for all sql backends added support for the sqlite database backend Index: Makefile.in =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/storage/Makefile.in,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- Makefile.in 18 Apr 2004 21:02:37 -0000 1.4 +++ Makefile.in 13 Jun 2004 01:14:29 -0000 1.5 @@ -47,7 +47,7 @@ clean: clean-recursive distclean: clean distclean-recursive - rm -f Makefile *~ *.orig + rm -f Makefile *~ *.orig sharedsql/*~ sharedsql/*.orig maintainerclean: distclean maintainerclean-recursive |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:14:38
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21070 Modified Files: CHANGELOG configure configure.in fetchipac.c Log Message: factor out common code for all sql backends added support for the sqlite database backend Index: CHANGELOG =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/CHANGELOG,v retrieving revision 1.20 retrieving revision 1.21 diff -u -d -r1.20 -r1.21 --- CHANGELOG 16 May 2004 23:48:21 -0000 1.20 +++ CHANGELOG 13 Jun 2004 01:14:28 -0000 1.21 @@ -9,14 +9,18 @@ # no name means most likely that Al Zakharov did it # # current developers: -# kaiser13/az ......... Al Zakharov <kai...@us...> -# friedl/fl ........... Friedrich Lobenstock <fl...@fl...> +# kaiser13/az ......... Al Zakharov <kaiser13 at users dot sourceforge dot net> +# friedl/fl ........... Friedrich Lobenstock <fl at fl dot priv dot at> # # contributors: -# Thomas Zehetbauer <th...@ho...> -# +# Thomas Zehetbauer <thomasz at hostmaster dot org> +# Simon Hausman <simon at lst dot de> # +1.29 +- added support for the sqlite database backend (friedl/Simon Hausman) +- factor out common code for all sql backends (friedl + Simon Hausman) + 1.28 - known problem(s): * with ipactest script which does report errors Index: fetchipac.c =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/fetchipac.c,v retrieving revision 1.17 retrieving revision 1.18 diff -u -d -r1.17 -r1.18 --- fetchipac.c 9 May 2004 23:13:40 -0000 1.17 +++ fetchipac.c 13 Jun 2004 01:14:28 -0000 1.18 @@ -453,8 +453,11 @@ print_records(stdout, n, dr); } - /* free memory */ - if (strcmp(storage_method->name, "postgre") && (n>0)) + /* free memory + FIXME: in the future all backends should clear it's data at store_close() time */ + if (strcmp(storage_method->name, "postgre") && strcmp(storage_method->name, "sqlite") && (n>0)) + /* only for storage backends that don't already do free their own data + we have to do it now */ free_data_record_type_array(dr, n); } Index: configure =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/configure,v retrieving revision 1.10 retrieving revision 1.11 diff -u -d -r1.10 -r1.11 --- configure 17 May 2004 19:42:45 -0000 1.10 +++ configure 13 Jun 2004 01:14:28 -0000 1.11 @@ -2872,6 +2872,66 @@ fi +echo "$as_me:$LINENO: checking for sqlite_get_table in -lsqlite" >&5 +echo $ECHO_N "checking for sqlite_get_table in -lsqlite... $ECHO_C" >&6 +if test "${ac_cv_lib_sqlite_sqlite_get_table+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsqlite $LIBS" +cat >conftest.$ac_ext <<_ACEOF +#line $LINENO "configure" +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char sqlite_get_table (); +int +main () +{ +sqlite_get_table (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_sqlite_sqlite_get_table=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_sqlite_sqlite_get_table=no +fi +rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_sqlite_sqlite_get_table" >&5 +echo "${ECHO_T}$ac_cv_lib_sqlite_sqlite_get_table" >&6 +if test $ac_cv_lib_sqlite_sqlite_get_table = yes; then + HAVE_LIBSQLITE=yes +else + HAVE_LIBSQLITE=no +fi + LIBS="$LIBS -L. -lipac" postgresql_inc=-I/usr/include/pgsql @@ -3517,6 +3577,158 @@ LDFLAGS="$LDFLAGS $postgresql_lib -lpq" fi + +for ac_header in sqlite.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +#line $LINENO "configure" +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +#line $LINENO "configure" +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc in + yes:no ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to bug...@gn.... ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; + no:yes ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------ ## +## Report this to bug...@gn.... ## +## ------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + HAVE_SQLITE_H=yes +else + HAVE_SQLITE_H=no +fi + +done + + +HAVE_SQLITE=no +if test $HAVE_LIBSQLITE = yes; then + if test $HAVE_SQLITE_H = yes; then + HAVE_SQLITE=yes + fi +fi + echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6 if test "${ac_cv_c_const+set}" = set; then @@ -3736,8 +3948,9 @@ echo "$as_me:$LINENO: checking for available storage methods" >&5 echo $ECHO_N "checking for available storage methods... $ECHO_C" >&6 -STORAGEMETHODS=`(cd storage && find . -type d -print) | - sed -e 's#^\./##' | egrep -v '\.|CVS' | tr '\n' ' '` +STORAGEMETHODS=`(cd storage && find . -type d -not -name "sharedsql" \ + -not -name "CVS" -not -name "." -print) | \ + sed -e 's#^\./##' | tr '\n' ' '` storagemethod_makefiles="" STORAGEMETHODLIBRARYS="" @@ -3762,6 +3975,16 @@ continue fi fi + + if test $sm = sqlite; then + if test $HAVE_SQLITE = no; then + { echo "$as_me:$LINENO: WARNING: Sqlite library not found, omitting sqlite storage" >&5 +echo "$as_me: WARNING: Sqlite library not found, omitting sqlite storage" >&2;} + STORAGEMETHODS=`echo $STORAGEMETHODS|sed -e 's/sqlite \?//'` + continue + fi + fi + storagemethod_makefiles="$storagemethod_makefiles storage/$sm/Makefile" test -f "storage/$sm/configure" && \ configure_dirs="$configure_dirs storage/$sm" Index: configure.in =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/configure.in,v retrieving revision 1.10 retrieving revision 1.11 diff -u -d -r1.10 -r1.11 --- configure.in 18 Apr 2004 20:26:10 -0000 1.10 +++ configure.in 13 Jun 2004 01:14:28 -0000 1.11 @@ -62,6 +62,7 @@ AC_CHECK_LIB(pq,PQexec, HAVE_LIBPQ=yes) AC_CHECK_LIB(gdbm,gdbm_open, HAVE_LIBGDBM=yes) AC_CHECK_LIB(dl, dlopen) +AC_CHECK_LIB(sqlite, sqlite_get_table, [ HAVE_LIBSQLITE=yes ], [HAVE_LIBSQLITE=no]) LIBS="$LIBS -L. -lipac" postgresql_inc=-I/usr/include/pgsql @@ -88,6 +89,15 @@ LDFLAGS="$LDFLAGS $postgresql_lib -lpq" fi +AC_CHECK_HEADERS(sqlite.h, [HAVE_SQLITE_H=yes], [HAVE_SQLITE_H=no]) + +HAVE_SQLITE=no +if test $HAVE_LIBSQLITE = yes; then + if test $HAVE_SQLITE_H = yes; then + HAVE_SQLITE=yes + fi +fi + dnl Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_TYPE_SIZE_T @@ -145,8 +155,9 @@ dnl ----- decisions about storage backends dnl ----------------------------------- AC_MSG_CHECKING([for available storage methods]) -STORAGEMETHODS=`(cd storage && find . -type d -print) | - sed -e 's#^\./##' | egrep -v '\.|CVS' | tr '\n' ' '` +STORAGEMETHODS=`(cd storage && find . -type d -not -name "sharedsql" \ + -not -name "CVS" -not -name "." -print) | \ + sed -e 's#^\./##' | tr '\n' ' '` AC_SUBST(STORAGEMETHODS) storagemethod_makefiles="" STORAGEMETHODLIBRARYS="" @@ -171,6 +182,16 @@ continue fi fi + + dnl test if we are trying to compile with sqlite and if the library is there + if test $sm = sqlite; then + if test $HAVE_SQLITE = no; then + AC_MSG_WARN([Sqlite library not found, omitting sqlite storage]) + STORAGEMETHODS=`echo $STORAGEMETHODS|sed -e 's/sqlite \?//'` + continue + fi + fi + storagemethod_makefiles="$storagemethod_makefiles storage/$sm/Makefile" test -f "storage/$sm/configure" && \ configure_dirs="$configure_dirs storage/$sm" |
From: Friedrich L. <fr...@us...> - 2004-06-13 01:04:34
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv11548 Modified Files: ipacsum fetchipac.8 ipac-convert.8 ipacsum.8 Log Message: increase version to 1.29 Index: ipac-convert.8 =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/ipac-convert.8,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- ipac-convert.8 27 Sep 2003 11:27:33 -0000 1.4 +++ ipac-convert.8 13 Jun 2004 01:04:17 -0000 1.5 @@ -76,7 +76,7 @@ If the source database is corrupted, results are undefined. .SH VERSION .\" =()<This man page belongs to ipac version @<VERSION>@.>()= -This man page belongs to ipac version 1.28. +This man page belongs to ipac version 1.29. For updates and other information, look at .B http://sourceforge.net/projects/ipac-ng Index: ipacsum =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/ipacsum,v retrieving revision 1.11 retrieving revision 1.12 diff -u -d -r1.11 -r1.12 --- ipacsum 18 Apr 2004 20:26:10 -0000 1.11 +++ ipacsum 13 Jun 2004 01:04:06 -0000 1.12 @@ -52,7 +52,7 @@ # =()<$datdelim="@<DATDELIM>@";>()= $datdelim="#-#-#-#-#"; # =()<$version="@<VERSION>@";>()= -$version="1.28"; +$version="1.29"; # =()<$prefix="@<prefix>@";>()= $prefix="/usr"; # =()<$exec_prefix="@<exec_prefix>@";>()= Index: fetchipac.8 =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/fetchipac.8,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- fetchipac.8 27 Sep 2003 11:27:33 -0000 1.4 +++ fetchipac.8 13 Jun 2004 01:04:17 -0000 1.5 @@ -64,7 +64,7 @@ .IR methods . The storage methods provided are: .\" =()<.I @<STORAGEMETHODS>@.>()= -.I gdbm plain-file postgre . +.I gdbm plain-file postgre. The default storage method used is .\" =()<.IR @<DEFAULT_STORAGE>@.>()= .IR gdbm. @@ -339,7 +339,7 @@ to use ipchains with 2.4.* kernels!) .SH VERSION .\" =()<This man page belongs to ipac-ng version @<VERSION>@.>()= -This man page belongs to ipac-ng version 1.28. +This man page belongs to ipac-ng version 1.29. For updates and other information, look at .B http://sf.net/projects/ipac-ng Index: ipacsum.8 =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/ipacsum.8,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- ipacsum.8 27 Sep 2003 11:27:33 -0000 1.4 +++ ipacsum.8 13 Jun 2004 01:04:17 -0000 1.5 @@ -414,7 +414,7 @@ very well and the output is ugly. Use --png instead. .SH VERSION .\" =()<This man page belongs to ipac version @<VERSION>@.>()= -This man page belongs to ipac version 1.28. +This man page belongs to ipac version 1.29. For updates and other information, look at .B http://sourceforge.net/projects/ipac-ng |
From: Friedrich L. <fr...@us...> - 2004-06-13 00:55:36
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/sqlite In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv3695/sqlite Log Message: Directory /cvsroot/ipac-ng/ipac-ng/storage/sqlite added to the repository |
From: Friedrich L. <fr...@us...> - 2004-06-13 00:55:18
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/sharedsql In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv3385/sharedsql Log Message: Directory /cvsroot/ipac-ng/ipac-ng/storage/sharedsql added to the repository |
From: Friedrich L. <fr...@us...> - 2004-05-27 13:15:54
|
Update of /cvsroot/ipac-ng/ipac-ng/contrib/suse In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv27201 Removed Files: HOWTO-postgres-db-on-SuSE-8.0 Log Message: moved to doc/ --- HOWTO-postgres-db-on-SuSE-8.0 DELETED --- |
From: Friedrich L. <fr...@us...> - 2004-05-17 19:47:05
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv14278 Modified Files: .ignore Log Message: The .ignore file should of course also be ignored while creating a release Index: .ignore =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/.ignore,v retrieving revision 1.1 retrieving revision 1.2 diff -u -d -r1.1 -r1.2 --- .ignore 17 May 2004 19:37:36 -0000 1.1 +++ .ignore 17 May 2004 19:46:40 -0000 1.2 @@ -5,6 +5,7 @@ *.d *~ .deps +.ignore ipac-ng stamp-h config.cache |
From: Friedrich L. <fr...@us...> - 2004-05-17 19:42:56
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv13481 Modified Files: configure Log Message: forgot to run "autoconf" Index: configure =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/configure,v retrieving revision 1.9 retrieving revision 1.10 diff -u -d -r1.9 -r1.10 --- configure 11 Apr 2004 22:46:36 -0000 1.9 +++ configure 17 May 2004 19:42:45 -0000 1.10 @@ -4053,7 +4053,14 @@ fi if test $DEFAULT_AGENT = iptables; then - if test -d /usr/lib/iptables; then + # test for the 64 bit libs first (18.04.2004 - Thomas Zehetbauer <th...@ho...>) + if test -d /usr/lib64/iptables; then + IPT_LIB_DIR=/usr/lib64/iptables + elif test -d /lib64/iptables; then + IPT_LIB_DIR=/lib64/iptables + elif test -d /usr/local/lib64/iptables; then + IPT_LIB_DIR=/usr/local/lib64/iptables + elif test -d /usr/lib/iptables; then IPT_LIB_DIR=/usr/lib/iptables elif test -d /lib/iptables; then IPT_LIB_DIR=/lib/iptables |
From: Friedrich L. <fr...@us...> - 2004-05-17 19:37:44
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv12268 Added Files: .ignore Log Message: add ignore file for releasing --- NEW FILE: .ignore --- *.org *.orig *.rej *.spec *.d *~ .deps ipac-ng stamp-h config.cache config.h config.log config.status DEADJOE CVS out |
From: Friedrich L. <fr...@us...> - 2004-05-16 23:48:30
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv26074 Modified Files: CHANGELOG Log Message: add section "known problems" to changelog Index: CHANGELOG =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/CHANGELOG,v retrieving revision 1.19 retrieving revision 1.20 diff -u -d -r1.19 -r1.20 --- CHANGELOG 9 May 2004 23:45:01 -0000 1.19 +++ CHANGELOG 16 May 2004 23:48:21 -0000 1.20 @@ -18,6 +18,11 @@ # 1.28 +- known problem(s): + * with ipactest script which does report errors + despite everything is working ok + * duplicate tickmarks in generated images can happen, this + is caused by rounding - no fix yet - fixed the unspooling problem that creeped in during 1.28 (friedl) - non classic mode removed - make ipac-ng compile on 64 bit, also checked gdbm storage (friedl/Thomas Zehetbauer) |
From: Friedrich L. <fr...@us...> - 2004-05-09 23:45:10
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv6666 Modified Files: CHANGELOG Log Message: document the fix of the unspooling problem Index: CHANGELOG =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/CHANGELOG,v retrieving revision 1.18 retrieving revision 1.19 diff -u -d -r1.18 -r1.19 --- CHANGELOG 9 May 2004 15:13:04 -0000 1.18 +++ CHANGELOG 9 May 2004 23:45:01 -0000 1.19 @@ -18,6 +18,7 @@ # 1.28 +- fixed the unspooling problem that creeped in during 1.28 (friedl) - non classic mode removed - make ipac-ng compile on 64 bit, also checked gdbm storage (friedl/Thomas Zehetbauer) - put some documentation together in the doc/ subdirectory (friedl) |
From: Friedrich L. <fr...@us...> - 2004-05-09 23:41:44
|
Update of /cvsroot/ipac-ng/ipac-ng/doc In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv6063 Modified Files: HOWTO-postgres-db postgres-db.sql Added Files: ipac.conf.sample rules.conf.sample Log Message: documentation update --- NEW FILE: rules.conf.sample --- # Example config file with accounting rules # Install as /etc/ipac-ng/rules.conf.iptables # # Format: # Name of rule|direction|interface|protocol|source|destination|extension| # WARNING!!!! spaces are not allowed before and after '|'. # # where # Name of rule Any string to identify this rule # direction ipac~fi - forward in # ipac~fo - forward out # ipac~i - outgoing from machine with ipac-ng to other host(/net) # (or incoming to otherhost) # ipac~o - incoming to machine with ipac-ng # (or outgoing from otherhost) # # interface interface name, '+' means all interfaces (dont try to use ip numbers here!) # protocol tcp | udp | icmp | all # source \ # destination both as described in ipfwadm(8), or empty # # # W A R N I N G ! ! ! # # Don't use symbols other than '0-9A-z[space]' in rules names. You may encounter # some strange troubles. Incoming Total System|ipac~o|eth0|all|||| Incoming Total System|ipac~fi|eth0|all|||| Outgoing Total System|ipac~i|eth0|all|||| Outgoing Total System|ipac~fo|eth0|all|||| Index: postgres-db.sql =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/postgres-db.sql,v retrieving revision 1.1 retrieving revision 1.2 diff -u -d -r1.1 -r1.2 --- postgres-db.sql 11 Apr 2004 20:50:56 -0000 1.1 +++ postgres-db.sql 9 May 2004 23:41:35 -0000 1.2 @@ -1,4 +1,17 @@ +-- the drop table has been commented out to protect the innocent ;-) +-- DROP TABLE logs; + + +-- +-- Please increase the size of the hostname field if the output of +-- +-- hostname | wc -c +-- +-- returns a number greater than 16. +-- + + CREATE TABLE "logs" ( "rule_name" character varying(32) NOT NULL, "bytes" bigint NOT NULL, --- NEW FILE: ipac.conf.sample --- # This is the main ipac-ng configuration file. It contains the # configuration directives that give the ipac-ng its instructions. ## accouting agent. iptables and ipchains available now. account agent = iptables ## storage. gdbm, postgre and files supported. (files is not recommended) storage = postgre ## rules file rules file = /etc/ipac-ng/rules.conf # dont store lines contains only zeroes to speedup processing and to save space drop zero lines = yes ## This parameters controls database location ## left blank 'db host', 'db port' for local database ## as now, both databasess (access and storage) configured by these parameters #db host = localhost #db port = 5432 db name = ipac db user = ipac db pass = "XXXXXXXX" Index: HOWTO-postgres-db =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/HOWTO-postgres-db,v retrieving revision 1.2 retrieving revision 1.3 diff -u -d -r1.2 -r1.3 --- HOWTO-postgres-db 8 May 2004 13:20:02 -0000 1.2 +++ HOWTO-postgres-db 9 May 2004 23:41:35 -0000 1.3 @@ -19,7 +19,7 @@ to be done to get the ipac database initialzed on a fresh installed postgres database server on SuSE 8.0. -1) check if you have all postgres packages installed: (SuSE specific!) +1) Check if you have all postgres packages installed: (SuSE specific!) # rpm -qa | sort | grep postgresql postgresql-7.2-90 postgresql-contrib-7.2-90 @@ -54,7 +54,7 @@ username "postgres". This will take care that the database socket in /tmp will not be removed. -2) start the database server the first time +2) Start the database server for the first time # rcpostgresql start Initializing the PostgreSQL database at \ location /var/lib/pgsql/data done @@ -67,20 +67,22 @@ Remark: rcpostgresql is probably /etc/init.d/postgresql or similar on none SuSE systems -3) as user postgres create a user "ipac" which will later be used for +3) As user postgres create a user "ipac" which will later be used for ipac-ng's fetchipac # createuser -U postgres -P -E -D -A ipac Enter password for user "ipac": xxxxxxxx Enter it again: xxxxxxxx CREATE USER -4) as user postgres create the table "ipac" which will later be used for +4) As user postgres create the table "ipac" which will later be used for ipac-ng # createdb -U postgres ipac CREATE DATABASE -5) now we fill this database with tables and grant the users their specific - rights +5) First edit the file postgres-db.sql with your prefered editor and adapt + the hostname field accordingly to the instructions at the top of this + file and now we fill our database ipac with tables and grant the users + their specific rights # psql -U postgres ipac < postgres-db.sql @@ -91,7 +93,7 @@ Note: Don't do this if you did already create the tables and there's data in them because this step will definitely destroy all previous data! -6) Now we assign passwords to the database admin user postgres +6) Now we assign a password to the database admin user postgres # psql -U postgres ipac Welcome to psql, the PostgreSQL interactive terminal. @@ -122,7 +124,7 @@ Note: before you start editing do a "su postgres" -8) now the database server needs to be stopped and started again +8) Now the database server needs to be stopped and started again # rcpostgresql stop Shutting down PostgreSQL done @@ -134,10 +136,25 @@ Remark: rcpostgresql is probably /etc/init.d/postgresql or similar on none SuSE systems -9) Now you can start configuring iapc-ng +9) Now you can start configuring and installing iapc-ng - ./configure \ - --enable-default-storage=postgre \ - --with-fetch-dbase-login=ipac \ - --with-fetch-dbase-pass=XXXXXXXX (replace with your own password) + ./configure --enable-default-storage=postgre # optionaly add "--prefix=/usr" + make + make install + +10) ipac-ng needs to have the config files ipac.conf and rules.conf + installed in /etc/ipac-ng. In rules.conf you can then install your + accounting rules + + mkdir /etc/ipac-ng + cp ipac.conf.sample /etc/ipac-ng/ipac.conf + cp rules.conf.sample /etc/ipac-ng/rules.conf + + Note: You need to edit /etc/ipac-ng/ipac.conf and set the password we + defined for the user ipac in step 3 + +11) In case the database server can not be reached ipac-ng needs a spool + directory where it saves the data in a files named "spool" + + mkdir /var/lib/ipac |
From: Friedrich L. <fr...@us...> - 2004-05-09 23:17:06
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv1351 Modified Files: TODO Log Message: update TODO Index: TODO =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/TODO,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- TODO 11 Apr 2004 21:33:45 -0000 1.4 +++ TODO 9 May 2004 23:16:56 -0000 1.5 @@ -1,17 +1,9 @@ # $Id$ -Things to do for ipac-ng 2.0 +Things to do for ipac-ng 2.0 and beyond: - complete rewrite :( -+ configuration through config file, not just at compile time - -- integrate passwords database and password management for users and admin - -- complete support for email (smtp\pop\imap) - -- implement support for squid traffic accounting (no-no-no!) - - implement support for iproute2 (maybe) (not now :) - update documentation (partially done) @@ -31,3 +23,8 @@ - create documentation document with plain-file storage method description +Things to do for ipac-ng 1.29: + +- add support for SQLite database backend +- add support for MySQL database backend + |
From: Friedrich L. <fr...@us...> - 2004-05-09 23:13:50
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv32135 Modified Files: fetchipac.c Log Message: fix problem with unspooling data from spool file make remark about hostname "problem" in storage/postgre/postgre.c more clear Index: fetchipac.c =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/fetchipac.c,v retrieving revision 1.16 retrieving revision 1.17 diff -u -d -r1.16 -r1.17 --- fetchipac.c 19 Nov 2003 03:55:12 -0000 1.16 +++ fetchipac.c 9 May 2004 23:13:40 -0000 1.17 @@ -485,6 +485,7 @@ int spool_record(const data_record_type *dr) { FILE *f; + char *ahost_saved; int mof; int ret; @@ -497,6 +498,10 @@ mof = machine_output_format; machine_output_format = 1; + /* we have to use the ahost - see storage/postgre for details why + the hostname in the rules itself is ignored */ + ahost_saved = ahost; + ahost = dr->machine_name; ret = 0; if (print_records(f, 1, dr) != 0) { fprintf(stderr, "%s: cant write data record to spool file \"" @@ -506,6 +511,7 @@ } fputs("\n", f); fclose(f); + ahost = ahost_saved; machine_output_format = mof; return ret; } |
From: Friedrich L. <fr...@us...> - 2004-05-09 23:13:49
|
Update of /cvsroot/ipac-ng/ipac-ng/storage/postgre In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv32135/storage/postgre Modified Files: postgre.c Log Message: fix problem with unspooling data from spool file make remark about hostname "problem" in storage/postgre/postgre.c more clear Index: postgre.c =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/storage/postgre/postgre.c,v retrieving revision 1.29 retrieving revision 1.30 diff -u -d -r1.29 -r1.30 --- postgre.c 2 Feb 2004 22:19:35 -0000 1.29 +++ postgre.c 9 May 2004 23:13:40 -0000 1.30 @@ -398,10 +398,12 @@ exit (-1); } (*data)[index].timestamp = timestamp_akt; - /* FIXME: storing only the first machine name found is crap - so there's no need to store the machine name anyway + /* FIXME: Two records with the same 'that_time' field but different + hostname fields get listed as being from the hostname of + the first record. Therefore it makes no sense and is plain + wrong to store the hostname in this query. */ - tmp = (char *)PQgetvalue (res, i, 4); + /* tmp = (char *)PQgetvalue (res, i, 4); (*data)[index].machine_name = calloc(1, strlen(tmp)+1); if ((*data)[index].machine_name == NULL) { |
From: Friedrich L. <fr...@us...> - 2004-05-09 15:13:12
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv408 Modified Files: CHANGELOG Log Message: document that non classic mode is now (hopefully) completely removed Index: CHANGELOG =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/CHANGELOG,v retrieving revision 1.17 retrieving revision 1.18 diff -u -d -r1.17 -r1.18 --- CHANGELOG 18 Apr 2004 21:19:08 -0000 1.17 +++ CHANGELOG 9 May 2004 15:13:04 -0000 1.18 @@ -18,11 +18,11 @@ # 1.28 +- non classic mode removed - make ipac-ng compile on 64 bit, also checked gdbm storage (friedl/Thomas Zehetbauer) - put some documentation together in the doc/ subdirectory (friedl) - the never documented webinterface is gone now (kaiser13, friedl) - reduce worst case memory usage with huge database of >= 4GB to about 1.2GB (friedl) -- start to remove classic mode - logging of database debug messages improved (friedl) - add option '--enable-debug-database' to configure (friedl) - add option '--human-kilo' to ipacsum, the default now is 1K=1024 (friedl) |
From: Friedrich L. <fr...@us...> - 2004-05-09 02:46:41
|
Update of /cvsroot/ipac-ng/ipac-ng/doc In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv5585 Modified Files: tips-and-tricks Log Message: corrections Index: tips-and-tricks =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/tips-and-tricks,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- tips-and-tricks 9 May 2004 02:42:15 -0000 1.3 +++ tips-and-tricks 9 May 2004 02:46:32 -0000 1.4 @@ -7,7 +7,10 @@ ./configure --prefix=${PWD%/*} --sbindir='${exec_prefix}'/${PWD##*/} --with-confdir=${PWD}/ipac-ng mkdir ipac-ng - Now you put the config files in ./ipac-ng not in /etc/ipac-ng. + Don't forget to put the config files in ./ipac-ng not in /etc/ipac-ng! + + You can now run fetchipac and ipacsum by calling them via + "./fetchipac" and "./ipacsum" respectively. ------------------------------------------------------------------------------- @@ -19,7 +22,8 @@ ./configure --prefix=${PWD%/*} --sbindir='${exec_prefix}'/${PWD##*/} - Now you can run by calling them via "./fetchipac" or "./ipacsum". + You can now run fetchipac and ipacsum by calling them via + "./fetchipac" and "./ipacsum" respectively. Note: This is also a good way of debugging fetchipac because the binary will not be "stripped" this way. |
From: Friedrich L. <fr...@us...> - 2004-05-09 02:42:24
|
Update of /cvsroot/ipac-ng/ipac-ng/doc In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv5059 Modified Files: tips-and-tricks Log Message: updated tips Index: tips-and-tricks =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/tips-and-tricks,v retrieving revision 1.2 retrieving revision 1.3 diff -u -d -r1.2 -r1.3 --- tips-and-tricks 8 May 2004 13:20:02 -0000 1.2 +++ tips-and-tricks 9 May 2004 02:42:15 -0000 1.3 @@ -9,3 +9,17 @@ Now you put the config files in ./ipac-ng not in /etc/ipac-ng. +------------------------------------------------------------------------------- + +Q: How can I test a new version of ipacsum without overwriting the currently + installed version? Meaning I want to run it from the source directory but + want to use the configuration files of the currently installed version. + +A: Run the following configure command (add other options if needed): + + ./configure --prefix=${PWD%/*} --sbindir='${exec_prefix}'/${PWD##*/} + + Now you can run by calling them via "./fetchipac" or "./ipacsum". + + Note: This is also a good way of debugging fetchipac because the binary + will not be "stripped" this way. |
From: Friedrich L. <fr...@us...> - 2004-05-08 13:23:10
|
Update of /cvsroot/ipac-ng/ipac-ng In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv29258 Modified Files: conffile.y Log Message: stop at the first error found as to break an endless loop Index: conffile.y =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/conffile.y,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- conffile.y 14 Nov 2003 19:51:29 -0000 1.3 +++ conffile.y 8 May 2004 13:23:01 -0000 1.4 @@ -233,5 +233,5 @@ conferror(char *s) { fprintf(stderr, "Config parse error near line %d: %s\n", confline, s); - return 0; + exit (1); } |
From: Friedrich L. <fr...@us...> - 2004-05-08 13:20:12
|
Update of /cvsroot/ipac-ng/ipac-ng/doc In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv28363 Modified Files: HOWTO-postgres-db tips-and-tricks Log Message: documentation cleanup and update Index: tips-and-tricks =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/tips-and-tricks,v retrieving revision 1.1 retrieving revision 1.2 diff -u -d -r1.1 -r1.2 --- tips-and-tricks 8 Nov 2003 00:10:29 -0000 1.1 +++ tips-and-tricks 8 May 2004 13:20:02 -0000 1.2 @@ -4,5 +4,8 @@ A: Run the following configure command (add other options if needed): - ./configure --prefix=${PWD%/*} --sbindir='${exec_prefix}'/${PWD##*/} + ./configure --prefix=${PWD%/*} --sbindir='${exec_prefix}'/${PWD##*/} --with-confdir=${PWD}/ipac-ng + mkdir ipac-ng + + Now you put the config files in ./ipac-ng not in /etc/ipac-ng. Index: HOWTO-postgres-db =================================================================== RCS file: /cvsroot/ipac-ng/ipac-ng/doc/HOWTO-postgres-db,v retrieving revision 1.1 retrieving revision 1.2 diff -u -d -r1.1 -r1.2 --- HOWTO-postgres-db 11 Apr 2004 20:50:56 -0000 1.1 +++ HOWTO-postgres-db 8 May 2004 13:20:02 -0000 1.2 @@ -74,13 +74,6 @@ Enter it again: xxxxxxxx CREATE USER - as user postgres create a user "apache" which will later be used for - ipac-ng's webinterface - # createuser -U postgres -P -E -D -A apache - Enter password for user "apache": xxxxxxxx - Enter it again: xxxxxxxx - CREATE USER - 4) as user postgres create the table "ipac" which will later be used for ipac-ng # createdb -U postgres ipac |