|
From: Jiri J. <jja...@re...> - 2013-10-07 11:42:13
|
Signed-off-by: Jiri Jaburek <jja...@re...>
---
audit-test/netfilter/do_netfilsvr.bash | 6 ------
audit-test/netfilter/run.conf | 6 ------
2 files changed, 12 deletions(-)
delete mode 100755 audit-test/netfilter/do_netfilsvr.bash
diff --git a/audit-test/netfilter/do_netfilsvr.bash b/audit-test/netfilter/do_netfilsvr.bash
deleted file mode 100755
index 7bb4296..0000000
--- a/audit-test/netfilter/do_netfilsvr.bash
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-# Start up a local lblnet_tst_server
-
-exec ../utils/network-server/lblnet_tst_server -p 4000 &
-exit
diff --git a/audit-test/netfilter/run.conf b/audit-test/netfilter/run.conf
index af28b90..c1eb65c 100644
--- a/audit-test/netfilter/run.conf
+++ b/audit-test/netfilter/run.conf
@@ -394,12 +394,6 @@ function setup_default {
if [[ $tspid ]]; then
kill -9 $tspid
fi
-
- if [[ ! $xndpid ]]; then
- echo "starting local lblnet_tst_server"
- ./do_netfilsvr.bash
- sleep 1
- fi
fi
# generate the host command string
--
1.8.3.1
|
|
From: Jiri J. <jja...@re...> - 2013-10-07 11:48:16
|
The virsh command blocks until a requested operation is completed,
there's no need to sleep further.
Also check guest more often (for installed system).
Signed-off-by: Jiri Jaburek <jja...@re...>
---
audit-test/kvm-cgroups/functions_cgroup_device.bash | 2 +-
audit-test/kvm-iommu/test_pci_passthrough.bash | 9 +--------
audit-test/kvm/run.conf | 16 ++++++----------
3 files changed, 8 insertions(+), 19 deletions(-)
diff --git a/audit-test/kvm-cgroups/functions_cgroup_device.bash b/audit-test/kvm-cgroups/functions_cgroup_device.bash
index e94e1b6..5536805 100755
--- a/audit-test/kvm-cgroups/functions_cgroup_device.bash
+++ b/audit-test/kvm-cgroups/functions_cgroup_device.bash
@@ -77,7 +77,7 @@ create_guest_domain() {
append_cleanup "/bin/rm -f ${1}.xml"
prepend_cleanup "/usr/bin/virsh destroy $1"
- /usr/bin/virsh create ${1}.xml && sleep 3
+ /usr/bin/virsh create ${1}.xml
return $?
}
diff --git a/audit-test/kvm-iommu/test_pci_passthrough.bash b/audit-test/kvm-iommu/test_pci_passthrough.bash
index 91244b4..33715d1 100755
--- a/audit-test/kvm-iommu/test_pci_passthrough.bash
+++ b/audit-test/kvm-iommu/test_pci_passthrough.bash
@@ -91,7 +91,6 @@ reload_kvm_module_for_unsafe_interrupts() {
/sbin/modprobe -r kvm
/sbin/modprobe kvm allow_unsafe_assigned_interrupts=1
/sbin/modprobe kvm_intel
- sleep 2
}
set_selinux_booleans() {
@@ -160,14 +159,13 @@ create_guest_domain() {
prepend_cleanup "/usr/bin/virsh destroy $1"
append_cleanup "/usr/bin/virsh nodedev-reattach $pci_device_name"
- /usr/bin/virsh create ${1}.xml && sleep 10
+ /usr/bin/virsh create ${1}.xml
return $?
}
destroy_guest_domain() {
local rc=0
/usr/bin/virsh destroy $1 || ((rc+=1))
- sleep 1
/usr/bin/virsh nodedev-reattach $pci_device_name || ((rc+=2))
return $rc
}
@@ -310,11 +308,9 @@ attach_pci_device() {
case $1 in
1) # Good case - attached correctly
/usr/bin/virsh attach-device $2 pci_dev.xml || ((rc+=1))
- sleep 3
;;
2|3) # Bad case - double attach
/usr/bin/virsh attach-device $2 pci_dev.xml && ((rc+=1))
- sleep 3
;;
*) exit_error "Unknown attach scenario"
;;
@@ -340,7 +336,6 @@ detach_pci_device() {
case $1 in
1) # Good case - detached correctly
/usr/bin/virsh detach-device $2 pci_dev.xml || ((rc+=1))
- sleep 3
check_device_driver $pci_driver || ((rc+=2))
# Look for NOT mapped PCI dev mem regions
pid_nomaps="`get_guest_domain_pid $2`"
@@ -348,7 +343,6 @@ detach_pci_device() {
;;
2) # Bad case - double detach
/usr/bin/virsh detach-device $2 pci_dev.xml && ((rc+=1))
- sleep 3
check_device_driver $pci_driver || ((rc+=2))
# Look for NOT mapped PCI dev mem regions
pid_nomaps="`get_guest_domain_pid $2`"
@@ -356,7 +350,6 @@ detach_pci_device() {
;;
3) # Bad case - already in use by other VM
/usr/bin/virsh detach-device $2 pci_dev.xml && ((rc+=1))
- sleep 3
check_device_driver "pci-stub" || ((rc+=2))
# Look for mapped PCI dev mem regions
pid_maps="`get_guest_domain_pid $3`"
diff --git a/audit-test/kvm/run.conf b/audit-test/kvm/run.conf
index 7371d72..0f39b0f 100644
--- a/audit-test/kvm/run.conf
+++ b/audit-test/kvm/run.conf
@@ -113,7 +113,6 @@ function run_test {
for i in $(seq $first $last); do
virsh destroy KVM-Guest-$i
- sleep 4
done
# Search for audit records generated for the processes representing
@@ -131,7 +130,6 @@ function run_test {
for i in $(seq $first $last); do
virsh start KVM-Guest-$i
- sleep 4
done
# Search for audit records generated for the processes representing
@@ -524,7 +522,7 @@ function startup_hook {
break
fi
- sleep 60
+ sleep 10
done
else
# Wait the specified timeout (total
@@ -532,14 +530,14 @@ function startup_hook {
# machine environment to complete
# its install.
- for i in $(seq 1 $timeout); do
+ for i in $(seq 1 $((timeout*6))); do
pids_count=$(ps -C qemu-kvm -o pid= | wc -l)
if [[ $pids_count -eq 0 ]]; then
break
fi
- sleep 60
+ sleep 10
done
fi
done
@@ -608,7 +606,7 @@ function startup_hook {
break
fi
- sleep 60
+ sleep 10
done
else
# Wait the specified timeout (total
@@ -616,14 +614,14 @@ function startup_hook {
# machine environment to complete
# its install.
- for i in $(seq 1 $timeout); do
+ for i in $(seq 1 $((timeout*6))); do
pids_count=$(ps -C qemu-kvm -o pid= | wc -l)
if [[ $pids_count -eq 0 ]]; then
break
fi
- sleep 60
+ sleep 10
done
fi
done
@@ -636,7 +634,6 @@ function startup_hook {
for i in $(seq $first $last); do
virsh start KVM-Guest-$i &> /dev/null
- sleep 4
done
# Export the filter key to use in audit rules
@@ -658,7 +655,6 @@ function cleanup_hook {
# destroy all virtual machines after testing
for i in $(seq $first $last); do
virsh destroy KVM-Guest-$i &> /dev/null
- sleep 4
done
# in FIPS mode restore gcrypt RNG source to /dev/random
--
1.8.3.1
|
|
From: Linda K. <lin...@hp...> - 2013-10-08 21:41:08
|
Wow, this is really impressive. Thanks for sorting out the quirks and bugs in the test suite that the sleeps mostly worked around. I recall some of these sleeps being added by the IBMers as they were testing with ppc and s390. Have the tests been run on those architectures? In any case, I think Miroslav should go ahead and push these patches. This is a huge improvement. Thanks again, -- ljk On 10/07/13 07:27, Jiri Jaburek wrote: > Hi, > yet another batch of changes from our team is here. > > This time, it's mostly about making the suite faster, with related > changes all around the idea. Aside from those, a new "make rerun" > feature is included, which re-runs only non-PASSed tests, along with > a few generic fix-ups. > > There are several important things I'd like to point out. > First, the discussed issue of "making lblnet_tst_server inetd-only" > no longer exists, I managed to create a solution which retains the > original standalone daemon functionality, second, the > "TCP RST related tests" change is included, and third, there's one > more similar change in this patchset I would like to point out > explicitly - patch 15. I'd really like some comments on that one. > > All changes are RHEL-6.2 compatible, I've tested both base and mls > runs of the suite without fails or errors. > > > Since this patch series is mainly about suite speedups, I should > probably provide some benchmarks. When trying to generate those, > I encountered an issue with tcp_syn_retries behaving differently > on RHEL6.2, RHEL6.3+ and RHEL7 (described in patch 04), so I had > to do three separate runs. > The following is a "time make run" of the three networking-related > buckets combined (network, netfilter, netfilebt): > > RHEL6.2, default tcp_syn_retries, upstream suite = 101 minutes > RHEL6.2, default tcp_syn_retries, patched suite = 43 minutes > RHEL6.2, custom tcp_syn_retries, patched suite = 37 minutes > > RHEL6.3+, default tcp_syn_retries, upstream suite = 118 minutes > RHEL6.3+, default tcp_syn_retries, patched suite = 69 minutes > RHEL6.3+, custom tcp_syn_retries, patched suite = 37 minutes > > and based on known default value of tcp_syn_retries on RHEL7, we can > simulate a RHEL7 full-pass run on RHEL6.3+: > > (RHEL7), default tcp_syn_retries, upstream suite = 136 minutes > (RHEL7), default tcp_syn_retries, patched suite = 90 minutes > (RHEL7), custom tcp_syn_retries, patched suite = 37 minutes > > IOW, this patchset speeds up things on RHEL6.2 by 64 minutes, > on RHEL6.3+ by 81 minutes and on RHEL7 by 99 minutes (or 1h39m), > counting only the networking-related buckets, on our hardware. > More speed improvements come from patches 07 and 22. > > > Please see commit messages of respective patches for more information, > the patches are attached via In-Reply-To/References to this mail. > > Thanks for the review, > Jiri > > ------------------------------------------------------------------------------ > October Webinars: Code for Performance > Free Intel webinars can help you accelerate application performance. > Explore tips for MPI, OpenMP, advanced profiling, and more. Get the most from > the latest Intel processors and coprocessors. See abstracts and register > > http://pubads.g.doubleclick.net/gampad/clk?id=60134791&iu=/4140/ostg.clktrk > _______________________________________________ > Audit-test-developer mailing list > Aud...@li... > https://lists.sourceforge.net/lists/listinfo/audit-test-developer > |
|
From: Miroslav V. <mva...@re...> - 2013-10-10 10:11:50
|
Thanks Linda, we are glad you like the improvements. Patches are now upstream. Best regards, /M ----- Original Message ----- > Wow, this is really impressive. Thanks for sorting out the quirks > and bugs in the test suite that the sleeps mostly worked around. > I recall some of these sleeps being added by the IBMers as they were > testing with ppc and s390. Have the tests been run on those > architectures? > > In any case, I think Miroslav should go ahead and push these patches. > This is a huge improvement. > > Thanks again, > > -- ljk > > On 10/07/13 07:27, Jiri Jaburek wrote: > > Hi, > > yet another batch of changes from our team is here. > > > > This time, it's mostly about making the suite faster, with related > > changes all around the idea. Aside from those, a new "make rerun" > > feature is included, which re-runs only non-PASSed tests, along with > > a few generic fix-ups. > > > > There are several important things I'd like to point out. > > First, the discussed issue of "making lblnet_tst_server inetd-only" > > no longer exists, I managed to create a solution which retains the > > original standalone daemon functionality, second, the > > "TCP RST related tests" change is included, and third, there's one > > more similar change in this patchset I would like to point out > > explicitly - patch 15. I'd really like some comments on that one. > > > > All changes are RHEL-6.2 compatible, I've tested both base and mls > > runs of the suite without fails or errors. > > > > > > Since this patch series is mainly about suite speedups, I should > > probably provide some benchmarks. When trying to generate those, > > I encountered an issue with tcp_syn_retries behaving differently > > on RHEL6.2, RHEL6.3+ and RHEL7 (described in patch 04), so I had > > to do three separate runs. > > The following is a "time make run" of the three networking-related > > buckets combined (network, netfilter, netfilebt): > > > > RHEL6.2, default tcp_syn_retries, upstream suite = 101 minutes > > RHEL6.2, default tcp_syn_retries, patched suite = 43 minutes > > RHEL6.2, custom tcp_syn_retries, patched suite = 37 minutes > > > > RHEL6.3+, default tcp_syn_retries, upstream suite = 118 minutes > > RHEL6.3+, default tcp_syn_retries, patched suite = 69 minutes > > RHEL6.3+, custom tcp_syn_retries, patched suite = 37 minutes > > > > and based on known default value of tcp_syn_retries on RHEL7, we can > > simulate a RHEL7 full-pass run on RHEL6.3+: > > > > (RHEL7), default tcp_syn_retries, upstream suite = 136 minutes > > (RHEL7), default tcp_syn_retries, patched suite = 90 minutes > > (RHEL7), custom tcp_syn_retries, patched suite = 37 minutes > > > > IOW, this patchset speeds up things on RHEL6.2 by 64 minutes, > > on RHEL6.3+ by 81 minutes and on RHEL7 by 99 minutes (or 1h39m), > > counting only the networking-related buckets, on our hardware. > > More speed improvements come from patches 07 and 22. > > > > > > Please see commit messages of respective patches for more information, > > the patches are attached via In-Reply-To/References to this mail. > > > > Thanks for the review, > > Jiri > > > > ------------------------------------------------------------------------------ > > October Webinars: Code for Performance > > Free Intel webinars can help you accelerate application performance. > > Explore tips for MPI, OpenMP, advanced profiling, and more. Get the most > > from > > the latest Intel processors and coprocessors. See abstracts and register > > > http://pubads.g.doubleclick.net/gampad/clk?id=60134791&iu=/4140/ostg.clktrk > > _______________________________________________ > > Audit-test-developer mailing list > > Aud...@li... > > https://lists.sourceforge.net/lists/listinfo/audit-test-developer > > > > > ------------------------------------------------------------------------------ > October Webinars: Code for Performance > Free Intel webinars can help you accelerate application performance. > Explore tips for MPI, OpenMP, advanced profiling, and more. Get the most from > the latest Intel processors and coprocessors. See abstracts and register > > http://pubads.g.doubleclick.net/gampad/clk?id=60134071&iu=/4140/ostg.clktrk > _______________________________________________ > Audit-test-developer mailing list > Aud...@li... > https://lists.sourceforge.net/lists/listinfo/audit-test-developer > -- Miroslav Vadkerti :: Quality Assurance Engineer / RHCE :: BaseOS QE - Security Phone +420 532 294 129 :: CR cell +420 775 039 842 :: SR cell +421 904 135 440 IRC mvadkert at #qe #urt #brno #rpmdiff :: GnuPG ID 0x25881087 at pgp.mit.edu Red Hat s.r.o, Purkyňova 99/71, 612 45, Brno, Czech Republic |
|
From: Jiri J. <jja...@re...> - 2013-10-10 16:32:13
|
On 10/08/2013 11:40 PM, Linda Knippers wrote: > Wow, this is really impressive. Thanks for sorting out the quirks > and bugs in the test suite that the sleeps mostly worked around. > I recall some of these sleeps being added by the IBMers as they were > testing with ppc and s390. Have the tests been run on those > architectures? Yes for ppc64, and I can at least confirm that the netfilter bucket passes without problems. The netfilebt bucket isn't executed on ppc64 according to the Makefile. > > In any case, I think Miroslav should go ahead and push these patches. > This is a huge improvement. > > Thanks again, > > -- ljk > |
|
From: Linda K. <lin...@hp...> - 2013-10-10 16:39:09
|
Jiri Jaburek wrote: > On 10/08/2013 11:40 PM, Linda Knippers wrote: >> Wow, this is really impressive. Thanks for sorting out the quirks >> and bugs in the test suite that the sleeps mostly worked around. >> I recall some of these sleeps being added by the IBMers as they were >> testing with ppc and s390. Have the tests been run on those >> architectures? > > Yes for ppc64, and I can at least confirm that the netfilter bucket > passes without problems. The netfilebt bucket isn't executed on ppc64 > according to the Makefile. Oh right, no KVM support on ppc64 either, which is funny because those are the tests that caused me to worry. Not sure what the ppc64/kvm plans are for RHEL7. Thanks again, this is good stuff. -- ljk > >> In any case, I think Miroslav should go ahead and push these patches. >> This is a huge improvement. >> >> Thanks again, >> >> -- ljk >> |