You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
|
|
|
1
(14) |
|
2
(5) |
3
(15) |
4
(20) |
5
(2) |
6
(4) |
7
(16) |
8
(13) |
|
9
(3) |
10
(19) |
11
(13) |
12
(10) |
13
(16) |
14
|
15
|
|
16
|
17
(5) |
18
(14) |
19
(1) |
20
(12) |
21
(1) |
22
|
|
23
(1) |
24
(1) |
25
(1) |
26
(13) |
27
(2) |
28
(19) |
29
(15) |
|
30
(17) |
|
|
|
|
|
|
|
From: <sv...@va...> - 2013-06-17 18:59:59
|
florian 2013-06-17 19:59:51 +0100 (Mon, 17 Jun 2013)
New Revision: 2727
Log:
Add some more IRops to convert between binary floating point and
decimal floating point values. Needed to complete s390 DFP support.
Patch by Maran Pakkirisamy (ma...@li...).
Part of fixing BZ 307113.
Modified files:
trunk/priv/ir_defs.c
trunk/pub/libvex_ir.h
Modified: trunk/priv/ir_defs.c (+55 -7)
===================================================================
--- trunk/priv/ir_defs.c 2013-06-09 17:46:14 +01:00 (rev 2726)
+++ trunk/priv/ir_defs.c 2013-06-17 19:59:51 +01:00 (rev 2727)
@@ -983,11 +983,23 @@
case Iop_D128toI32U: vex_printf("D128toI32U"); return;
case Iop_D128toI64S: vex_printf("D128toI64S"); return;
case Iop_D128toI64U: vex_printf("D128toI64U"); return;
+ case Iop_F32toD32: vex_printf("F32toD32"); return;
+ case Iop_F32toD64: vex_printf("F32toD64"); return;
+ case Iop_F32toD128: vex_printf("F32toD128"); return;
+ case Iop_F64toD32: vex_printf("F64toD32"); return;
case Iop_F64toD64: vex_printf("F64toD64"); return;
+ case Iop_F64toD128: vex_printf("F64toD128"); return;
+ case Iop_F128toD32: vex_printf("F128toD32"); return;
+ case Iop_F128toD64: vex_printf("F128toD64"); return;
+ case Iop_F128toD128: vex_printf("F128toD128"); return;
+ case Iop_D32toF32: vex_printf("D32toF32"); return;
+ case Iop_D32toF64: vex_printf("D32toF64"); return;
+ case Iop_D32toF128: vex_printf("D32toF128"); return;
+ case Iop_D64toF32: vex_printf("D64toF32"); return;
case Iop_D64toF64: vex_printf("D64toF64"); return;
- case Iop_F64toD128: vex_printf("F64toD128"); return;
+ case Iop_D64toF128: vex_printf("D64toF128"); return;
+ case Iop_D128toF32: vex_printf("D128toF32"); return;
case Iop_D128toF64: vex_printf("D128toF64"); return;
- case Iop_F128toD128: vex_printf("F128toD128"); return;
case Iop_D128toF128: vex_printf("D128toF128"); return;
case Iop_AddD128: vex_printf("AddD128"); return;
case Iop_SubD128: vex_printf("SubD128"); return;
@@ -3015,21 +3027,57 @@
case Iop_I64UtoD64:
BINARY(ity_RMode, Ity_I64, Ity_D64);
+ case Iop_F32toD32:
+ BINARY(ity_RMode, Ity_F32, Ity_D32);
+
+ case Iop_F32toD64:
+ BINARY(ity_RMode, Ity_F32, Ity_D64);
+
+ case Iop_F32toD128:
+ BINARY(ity_RMode, Ity_F32, Ity_D128);
+
+ case Iop_F64toD32:
+ BINARY(ity_RMode, Ity_F64, Ity_D32);
+
case Iop_F64toD64:
BINARY(ity_RMode, Ity_F64, Ity_D64);
+ case Iop_F64toD128:
+ BINARY(ity_RMode, Ity_F64, Ity_D128);
+
+ case Iop_F128toD32:
+ BINARY(ity_RMode, Ity_F128, Ity_D32);
+
+ case Iop_F128toD64:
+ BINARY(ity_RMode, Ity_F128, Ity_D64);
+
+ case Iop_F128toD128:
+ BINARY(ity_RMode, Ity_F128, Ity_D128);
+
+ case Iop_D32toF32:
+ BINARY(ity_RMode, Ity_D32, Ity_F32);
+
+ case Iop_D32toF64:
+ BINARY(ity_RMode, Ity_D32, Ity_F64);
+
+ case Iop_D32toF128:
+ BINARY(ity_RMode, Ity_D32, Ity_F128);
+
+ case Iop_D64toF32:
+ BINARY(ity_RMode, Ity_D64, Ity_F32);
+
case Iop_D64toF64:
BINARY(ity_RMode, Ity_D64, Ity_F64);
- case Iop_F64toD128:
- BINARY(ity_RMode, Ity_F64, Ity_D128);
+ case Iop_D64toF128:
+ BINARY(ity_RMode, Ity_D64, Ity_F128);
+ case Iop_D128toF32:
+ BINARY(ity_RMode, Ity_D128, Ity_F32);
+
case Iop_D128toF64:
BINARY(ity_RMode, Ity_D128, Ity_F64);
- case Iop_F128toD128:
- BINARY(ity_RMode, Ity_F128, Ity_D128);
-
case Iop_D128toF128:
BINARY(ity_RMode, Ity_D128, Ity_F128);
Modified: trunk/pub/libvex_ir.h (+41 -5)
===================================================================
--- trunk/pub/libvex_ir.h 2013-06-09 17:46:14 +01:00 (rev 2726)
+++ trunk/pub/libvex_ir.h 2013-06-17 19:59:51 +01:00 (rev 2727)
@@ -1092,21 +1092,57 @@
/* IRRoundingMode(I32) x D128 -> I64 */
Iop_D128toI64U,
+ /* IRRoundingMode(I32) x F32 -> D32 */
+ Iop_F32toD32,
+
+ /* IRRoundingMode(I32) x F32 -> D64 */
+ Iop_F32toD64,
+
+ /* IRRoundingMode(I32) x F32 -> D128 */
+ Iop_F32toD128,
+
+ /* IRRoundingMode(I32) x F64 -> D32 */
+ Iop_F64toD32,
+
/* IRRoundingMode(I32) x F64 -> D64 */
Iop_F64toD64,
+ /* IRRoundingMode(I32) x F64 -> D128 */
+ Iop_F64toD128,
+
+ /* IRRoundingMode(I32) x F128 -> D32 */
+ Iop_F128toD32,
+
+ /* IRRoundingMode(I32) x F128 -> D64 */
+ Iop_F128toD64,
+
+ /* IRRoundingMode(I32) x F128 -> D128 */
+ Iop_F128toD128,
+
+ /* IRRoundingMode(I32) x D32 -> F32 */
+ Iop_D32toF32,
+
+ /* IRRoundingMode(I32) x D32 -> F64 */
+ Iop_D32toF64,
+
+ /* IRRoundingMode(I32) x D32 -> F128 */
+ Iop_D32toF128,
+
+ /* IRRoundingMode(I32) x D64 -> F32 */
+ Iop_D64toF32,
+
/* IRRoundingMode(I32) x D64 -> F64 */
Iop_D64toF64,
- /* IRRoundingMode(I32) x F64 -> D128 */
- Iop_F64toD128,
+ /* IRRoundingMode(I32) x D64 -> F128 */
+ Iop_D64toF128,
+ /* IRRoundingMode(I32) x D128 -> F32 */
+ Iop_D128toF32,
+
/* IRRoundingMode(I32) x D128 -> F64 */
Iop_D128toF64,
- /* IRRoundingMode(I32) x F128 -> D128 */
- Iop_F128toD128,
-
/* IRRoundingMode(I32) x D128 -> F128 */
Iop_D128toF128,
|
|
From: <sv...@va...> - 2013-06-17 18:44:27
|
florian 2013-06-17 19:44:15 +0100 (Mon, 17 Jun 2013)
New Revision: 13427
Log:
Update list of ignored files.
Modified directories:
trunk/none/tests/mips64/
Modified: trunk/none/tests/mips64/
Property changed: trunk/none/tests/mips64 (+0 -0)
___________________________________________________________________
Name: svn:ignore
+ Makefile
Makefile.in
.deps
|
|
From: Niall D. <ndo...@bl...> - 2013-06-13 19:07:20
|
> > Basically, why are there single quotes (') everywhere and what does
> > this mean?
>
> This is callgrind-specific.
>
> These symbol names are created if you ask Callgrind to provide information
at
> granularity of call chains. This should only happen if you use the option
"--
> separate-callers=xxx". The single quotes actually are kind of separators
and
> mean "called by". So the name up to the first single quote is the function
> executed, and the rest is the call chain.
Yup, that's exactly what he'd done. Thanks for the tip.
BTW Josef my callgrind to GraphML converter is progressing well. You can see
a representation of a BB10 Email Card call directed graph at
https://plus.google.com/109885711759115445224/posts/jnjHRuuBvz3. I'm hoping
to release that as open source too, assuming Legal approve.
Niall
|
|
From: Josef W. <Jos...@gm...> - 2013-06-13 17:44:16
|
Hi Niall,
Am 13.06.2013 16:46, schrieb Niall Douglas:
> Basically, why are there single quotes (') everywhere and what does this
> mean?
This is callgrind-specific.
These symbol names are created if you ask Callgrind to provide
information at granularity of call chains. This should only happen
if you use the option "--separate-callers=xxx". The single quotes
actually are kind of separators and mean "called by". So the name
up to the first single quote is the function executed, and the
rest is the call chain.
I used single quotes as the chances are low that a language uses
single quotes as part of regular function names. E.g. most other
puncation characters can appear in C++ symbols.
However, KCachegrind does not yet parse the symbols as call
chains (since a long time, I plan to implement that for more
precise vizualization).
> FYI this is occurring if valgrinding a forked process child. If we don't
> fork, it doesn't happen.
I have no idea what this has to do with forking.
Is this reproducable with a small test case? Can you write a bug report
for that.
Cheers,
Josef
>
> Thanks,
> Niall
>
> ---
> Opinions expressed here are my own and do not necessarily represent those of
> BlackBerry Inc.
>
>
>
>
> ------------------------------------------------------------------------------
> This SF.net email is sponsored by Windows:
>
> Build for Windows Store.
>
> http://p.sf.net/sfu/windows-dev2dev
>
>
>
> _______________________________________________
> Valgrind-developers mailing list
> Val...@li...
> https://lists.sourceforge.net/lists/listinfo/valgrind-developers
>
|
|
From: Niall D. <ndo...@bl...> - 2013-06-13 14:46:47
|
Dear valgrind devs,
Can anyone explain what's going on with this? Valgrind (Callgrind) is
outputting function locations like this:
set_clipboard_path'QString::~QString()'QVariant::cmp(QVariant const&)
const'QString::fromAscii_helper(char const*,
int)'QString::~QString()'QVariant::~QVariant()
__generic_strcpy'set_clipboard_path'QString::~QString()'QVariant::cmp(QVaria
nt const&) const'QString::fromAscii_helper(char const*,
int)'QString::~QString()
or worse:
0x00053c0f'0x00052d01'0x000538c1'QString::fromAscii_helper(char const*,
int)'QMapData::node_create(QMapData::Node**, int, int)'0x00054f39
0x00010075'0x0000fef9'0x0000ffaf'0x0000e0b7'0x0000e811'0x0000e681
Basically, why are there single quotes (') everywhere and what does this
mean?
FYI this is occurring if valgrinding a forked process child. If we don't
fork, it doesn't happen.
Thanks,
Niall
---
Opinions expressed here are my own and do not necessarily represent those of
BlackBerry Inc.
|
|
From: Philippe W. <phi...@sk...> - 2013-06-13 03:33:21
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.7.2 20121109 (Red Hat 4.7.2-8) GDB: GNU gdb (GDB) Fedora (7.5.1-37.fc18) Assembler: GNU assembler version 2.23.51.0.1-7.fc18 20120806 C library: GNU C Library stable release version 2.16 uname -mrs: Linux 3.7.2-204.fc18.ppc64 ppc64 Vendor version: Fedora release 18 (Spherical Cow) Nightly build on gcc110 ( Fedora release 18 (Spherical Cow), ppc64 ) Started at 2013-06-12 20:00:05 PDT Ended at 2013-06-12 20:32:58 PDT Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 557 tests, 31 stderr failures, 3 stdout failures, 0 stderrB failures, 0 stdoutB failures, 2 post failures == memcheck/tests/linux/getregset (stdout) memcheck/tests/linux/getregset (stderr) memcheck/tests/ppc64/power_ISA2_05 (stdout) memcheck/tests/supp_unknown (stderr) memcheck/tests/varinfo6 (stderr) memcheck/tests/wrap8 (stdout) memcheck/tests/wrap8 (stderr) massif/tests/big-alloc (post) massif/tests/deep-D (post) helgrind/tests/annotate_rwlock (stderr) helgrind/tests/free_is_write (stderr) helgrind/tests/hg02_deadlock (stderr) helgrind/tests/hg03_inherit (stderr) helgrind/tests/hg04_race (stderr) helgrind/tests/hg05_race2 (stderr) helgrind/tests/locked_vs_unlocked1_fwd (stderr) helgrind/tests/locked_vs_unlocked1_rev (stderr) helgrind/tests/locked_vs_unlocked2 (stderr) helgrind/tests/locked_vs_unlocked3 (stderr) helgrind/tests/pth_barrier1 (stderr) helgrind/tests/pth_barrier2 (stderr) helgrind/tests/pth_barrier3 (stderr) helgrind/tests/pth_destroy_cond (stderr) helgrind/tests/rwlock_race (stderr) helgrind/tests/tc01_simple_race (stderr) helgrind/tests/tc05_simple_race (stderr) helgrind/tests/tc06_two_races (stderr) helgrind/tests/tc06_two_races_xml (stderr) helgrind/tests/tc09_bad_unlock (stderr) helgrind/tests/tc14_laog_dinphils (stderr) helgrind/tests/tc16_byterace (stderr) helgrind/tests/tc18_semabuse (stderr) helgrind/tests/tc19_shadowmem (stderr) helgrind/tests/tc20_verifywrap (stderr) helgrind/tests/tc21_pthonce (stderr) helgrind/tests/tc22_exit_w_lock (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 03:18:57
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.3.0 20080428 (Red Hat 4.3.0-8) GDB: Assembler: GNU assembler version 2.18.50.0.6-2 20080403 C library: GNU C Library stable release version 2.8 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 9 (Sulphur) Nightly build on bristol ( x86_64, Fedora 9 ) Started at 2013-06-13 03:51:37 BST Ended at 2013-06-13 04:18:43 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 632 tests, 1 stderr failure, 1 stdout failure, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/amd64/insn-pcmpistri (stderr) none/tests/amd64/sse4-64 (stdout) |
|
From: Tom H. <to...@co...> - 2013-06-13 03:08:02
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.4.1 20090725 (Red Hat 4.4.1-2) GDB: Assembler: GNU assembler version 2.19.51.0.14-3.fc11 20090722 C library: GNU C Library stable release version 2.10.2 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 11 (Leonidas) Nightly build on bristol ( x86_64, Fedora 11 ) Started at 2013-06-13 03:41:24 BST Ended at 2013-06-13 04:07:44 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 634 tests, 1 stderr failure, 1 stdout failure, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/long_namespace_xml (stderr) none/tests/amd64/sse4-64 (stdout) |
|
From: Tom H. <to...@co...> - 2013-06-13 03:02:56
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.4.5 20101112 (Red Hat 4.4.5-2) GDB: Assembler: GNU assembler version 2.20.51.0.2-20.fc13 20091009 C library: GNU C Library stable release version 2.12.2 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 13 (Goddard) Nightly build on bristol ( x86_64, Fedora 13 ) Started at 2013-06-13 03:32:18 BST Ended at 2013-06-13 04:02:41 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 634 tests, 1 stderr failure, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == helgrind/tests/pth_barrier3 (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:53:37
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) GDB: GNU gdb (GDB) Fedora (7.2-52.fc14) Assembler: GNU assembler version 2.20.51.0.7-8.fc14 20100318 C library: GNU C Library stable release version 2.13 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 14 (Laughlin) Nightly build on bristol ( x86_64, Fedora 14 ) Started at 2013-06-13 03:23:13 BST Ended at 2013-06-13 03:53:24 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 653 tests, 1 stderr failure, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/origin5-bz2 (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:45:02
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.6.3 20120306 (Red Hat 4.6.3-2) GDB: GNU gdb (GDB) Fedora (7.3.1-48.fc15) Assembler: GNU assembler version 2.21.51.0.6-6.fc15 20110118 C library: GNU C Library stable release version 2.14.1 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 15 (Lovelock) Nightly build on bristol ( x86_64, Fedora 15 ) Started at 2013-06-13 03:13:12 BST Ended at 2013-06-13 03:44:50 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 1 stderr failure, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/origin5-bz2 (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:36:15
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.6.3 20120306 (Red Hat 4.6.3-2) GDB: GNU gdb (GDB) Fedora (7.3.50.20110722-16.fc16) Assembler: GNU assembler version 2.21.53.0.1-6.fc16 20110716 C library: GNU C Library development release version 2.14.90 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 16 (Verne) Nightly build on bristol ( x86_64, Fedora 16 ) Started at 2013-06-13 03:03:21 BST Ended at 2013-06-13 03:36:02 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 1 stderr failure, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/origin5-bz2 (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:26:38
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.7.2 20120921 (Red Hat 4.7.2-2) GDB: GNU gdb (GDB) Fedora (7.4.50.20120120-54.fc17) Assembler: GNU assembler version 2.22.52.0.1-10.fc17 20120131 C library: GNU C Library stable release version 2.15 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 17 (Beefy Miracle) Nightly build on bristol ( x86_64, Fedora 17 (Beefy Miracle) ) Started at 2013-06-13 02:51:45 BST Ended at 2013-06-13 03:26:24 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 5 stderr failures, 1 stdout failure, 0 stderrB failures, 0 stdoutB failures, 0 post failures == gdbserver_tests/mcinfcallRU (stderr) gdbserver_tests/mcinfcallWSRU (stderr) gdbserver_tests/mcmain_pic (stderr) memcheck/tests/origin5-bz2 (stderr) exp-sgcheck/tests/preen_invars (stdout) exp-sgcheck/tests/preen_invars (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:15:47
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.7.2 20121109 (Red Hat 4.7.2-8) GDB: GNU gdb (GDB) Fedora (7.5.1-38.fc18) Assembler: GNU assembler version 2.23.51.0.1-6.fc18 20120806 C library: GNU C Library stable release version 2.16 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 18 (Spherical Cow) Nightly build on bristol ( x86_64, Fedora 18 (Spherical Cow) ) Started at 2013-06-13 02:42:14 BST Ended at 2013-06-13 03:15:29 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 2 stderr failures, 1 stdout failure, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/origin5-bz2 (stderr) exp-sgcheck/tests/preen_invars (stdout) exp-sgcheck/tests/preen_invars (stderr) |
|
From: Christian B. <bor...@de...> - 2013-06-13 02:14:52
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (SUSE Linux) 4.3.4 [gcc-4_3-branch revision 152973] GDB: GNU gdb (GDB) SUSE (7.3-0.6.1) Assembler: GNU assembler (GNU Binutils; SUSE Linux Enterprise 11) 2.21.1 C library: GNU C Library stable release version 2.11.3 (20110527) uname -mrs: Linux 3.0.74-0.6.10-default s390x Vendor version: Welcome to SUSE Linux Enterprise Server 11 SP2 (s390x) - Kernel %r (%t). Nightly build on sless390 ( SUSE Linux Enterprise Server 11 SP1 gcc 4.3.4 on z196 (s390x) ) Started at 2013-06-13 03:45:01 CEST Ended at 2013-06-13 04:14:40 CEST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... done Regression test results follow == 635 tests, 0 stderr failures, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == |
|
From: Christian B. <bor...@de...> - 2013-06-13 02:13:23
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9bb4) GDB: GNU gdb (GDB) Fedora (7.5-1bb1.fc15) Assembler: GNU assembler version 2.21.51.0.6-6bb6.fc15 20110118 C library: GNU C Library stable release version 2.14.1 uname -mrs: Linux 3.8.6-60.x.20130412-s390xperformance s390x Vendor version: unknown Nightly build on fedora390 ( Fedora 15 with devel libc/toolchain on z196 (s390x) ) Started at 2013-06-13 03:45:01 CEST Ended at 2013-06-13 04:13:35 CEST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 636 tests, 2 stderr failures, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == helgrind/tests/tc18_semabuse (stderr) helgrind/tests/tc20_verifywrap (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 02:06:13
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1) GDB: GNU gdb (GDB) Fedora (7.6-30.fc19) Assembler: GNU assembler version 2.23.52.0.1-8.fc19 20130226 C library: GNU C Library (GNU libc) stable release version 2.17 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 19 (Schrödingerâs Cat) Nightly build on bristol ( x86_64, Fedora 19 (Schrödingerâs Cat) ) Started at 2013-06-13 02:32:39 BST Ended at 2013-06-13 03:05:58 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 3 stderr failures, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/dw4 (stderr) memcheck/tests/origin5-bz2 (stderr) exp-sgcheck/tests/hackedbz2 (stderr) |
|
From: Tom H. <to...@co...> - 2013-06-13 01:52:15
|
valgrind revision: 13426 VEX revision: 2726 C compiler: gcc (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1) GDB: GNU gdb (GDB) Fedora (7.6-32.fc20) Assembler: GNU assembler version 2.23.2 C library: GNU C Library (GNU libc) development release version 2.17.90 uname -mrs: Linux 3.9.4-200.fc18.x86_64 x86_64 Vendor version: Fedora release 20 (Rawhide) Nightly build on bristol ( x86_64, Fedora 20 ) Started at 2013-06-13 02:24:02 BST Ended at 2013-06-13 02:51:57 BST Results unchanged from 24 hours ago Checking out valgrind source tree ... done Configuring valgrind ... done Building valgrind ... done Running regression tests ... failed Regression test results follow == 655 tests, 3 stderr failures, 0 stdout failures, 0 stderrB failures, 0 stdoutB failures, 0 post failures == memcheck/tests/dw4 (stderr) memcheck/tests/origin5-bz2 (stderr) exp-sgcheck/tests/hackedbz2 (stderr) |
|
From: <sv...@va...> - 2013-06-12 21:45:59
|
philippe 2013-06-12 22:45:39 +0100 (Wed, 12 Jun 2013)
New Revision: 13426
Log:
improve --help for --main-stacksize and supported ARM cpu
If the command line option --main-stacksize is not used,
the current ulimit value is used, with a min of 1MB
and a max of 16MB. Document this min/max default formula
in the --help.
Also indicate that Valgrind supports ARMv7
Modified files:
trunk/coregrind/m_main.c
trunk/none/tests/cmdline1.stdout.exp
trunk/none/tests/cmdline2.stdout.exp
Modified: trunk/none/tests/cmdline1.stdout.exp (+1 -1)
===================================================================
--- trunk/none/tests/cmdline1.stdout.exp 2013-06-10 09:34:26 +01:00 (rev 13425)
+++ trunk/none/tests/cmdline1.stdout.exp 2013-06-12 22:45:39 +01:00 (rev 13426)
@@ -47,7 +47,7 @@
--max-stackframe=<number> assume stack switch for SP changes larger
than <number> bytes [2000000]
--main-stacksize=<number> set size of main thread's stack (in bytes)
- [use current 'ulimit' value]
+ [min(max(current 'ulimit' value,1MB),16MB)]
user options for Valgrind tools that replace malloc:
--alignment=<number> set minimum alignment of heap allocations [not used by this tool]
Modified: trunk/coregrind/m_main.c (+2 -1)
===================================================================
--- trunk/coregrind/m_main.c 2013-06-10 09:34:26 +01:00 (rev 13425)
+++ trunk/coregrind/m_main.c 2013-06-12 22:45:39 +01:00 (rev 13426)
@@ -159,7 +159,7 @@
" --max-stackframe=<number> assume stack switch for SP changes larger\n"
" than <number> bytes [2000000]\n"
" --main-stacksize=<number> set size of main thread's stack (in bytes)\n"
-" [use current 'ulimit' value]\n"
+" [min(max(current 'ulimit' value,1MB),16MB)]\n"
"\n"
" user options for Valgrind tools that replace malloc:\n"
" --alignment=<number> set minimum alignment of heap allocations [%s]\n"
@@ -1700,6 +1700,7 @@
VG_(printf)(" * x86 (practically any; Pentium-I or above), "
"AMD Athlon or above)\n");
VG_(printf)(" * AMD Athlon64/Opteron\n");
+ VG_(printf)(" * ARM (armv7)\n");
VG_(printf)(" * PowerPC (most; ppc405 and above)\n");
VG_(printf)(" * System z (64bit only - s390x; z900 and above)\n");
VG_(printf)("\n");
Modified: trunk/none/tests/cmdline2.stdout.exp (+1 -1)
===================================================================
--- trunk/none/tests/cmdline2.stdout.exp 2013-06-10 09:34:26 +01:00 (rev 13425)
+++ trunk/none/tests/cmdline2.stdout.exp 2013-06-12 22:45:39 +01:00 (rev 13426)
@@ -47,7 +47,7 @@
--max-stackframe=<number> assume stack switch for SP changes larger
than <number> bytes [2000000]
--main-stacksize=<number> set size of main thread's stack (in bytes)
- [use current 'ulimit' value]
+ [min(max(current 'ulimit' value,1MB),16MB)]
user options for Valgrind tools that replace malloc:
--alignment=<number> set minimum alignment of heap allocations [not used by this tool]
|
|
From: Philippe W. <phi...@sk...> - 2013-06-12 19:59:41
|
On Wed, 2013-06-12 at 11:08 +0200, Julian Seward wrote: > I would be happy to make stabs reading work in the new framework; it's > probably pretty easy. My concern mostly is to have a way to check I > didn't break anything. I suppose one option is to build something with > gcc-4.7 -gstabs and check that the old and new readers produce the same > debugging output. > > Better suggestions, and/or offers to test it properly, gratefully received. Build valgrind (or at least its regression tests) with -gstabs, make regtest and see if same nr of success before and after the change ? |
|
From: Ian C. <Ian...@ci...> - 2013-06-12 13:45:31
|
On Wed, 2013-06-12 at 14:42 +0100, Ian Campbell wrote: > The following small set of patches updates valgrind for the interface > changes made in Xen 4.3. > > With this and the two xl patches I just sent to the xen-devel list the > xl commands list, create (PV guest) and destroy commands are leak free. I've also attached these to https://bugs.kde.org/show_bug.cgi?id=321065 (I started doing this before I realised I'd have to add the patches 1-by-1 and SPAM the various bugzilla subscribers N times. Sorry about this -- next time I'll attach them as a tarball...) Ian. |
|
From: Ian C. <ian...@ci...> - 2013-06-12 13:43:06
|
From: Andrew Cooper <and...@ci...>
These hypercalls take no parameters and their return value is stright from the
ioctl() on privcmd. There are no memory read/writes.
Signed-off-by: Andrew Cooper <and...@ci...>
---
coregrind/m_syswrap/syswrap-xen.c | 9 +++++++++
1 files changed, 9 insertions(+), 0 deletions(-)
diff --git a/coregrind/m_syswrap/syswrap-xen.c b/coregrind/m_syswrap/syswrap-xen.c
index 8a0196f..ce300e8 100644
--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -152,6 +152,10 @@ PRE(memory_op)
break;
}
+ case VKI_XENMEM_get_sharing_freed_pages:
+ case VKI_XENMEM_get_sharing_shared_pages:
+ break;
+
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_memory_op", ARG1);
@@ -672,6 +676,11 @@ POST(memory_op)
sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
}
+
+ case VKI_XENMEM_get_sharing_freed_pages:
+ case VKI_XENMEM_get_sharing_shared_pages:
+ /* No outputs */
+ break;
}
}
--
1.7.2.5
|
|
From: Ian C. <ian...@ci...> - 2013-06-12 13:43:04
|
From: Andrew Cooper <and...@ci...>
Xen takes a pointer to a sysctl_sched_id struct, and writes a single uint32_t
into it. There are no memory reads, and a single memory write.
Signed-off-by: Andrew Cooper <and...@ci...>
---
coregrind/m_syswrap/syswrap-xen.c | 8 ++++++++
include/vki/vki-xen-sysctl.h | 7 ++++++-
2 files changed, 14 insertions(+), 1 deletions(-)
diff --git a/coregrind/m_syswrap/syswrap-xen.c b/coregrind/m_syswrap/syswrap-xen.c
index 61aa1e1..8a0196f 100644
--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -400,6 +400,10 @@ PRE(sysctl) {
}
break;
+ case VKI_XEN_SYSCTL_sched_id:
+ /* No inputs */
+ break;
+
case VKI_XEN_SYSCTL_cpupool_op:
PRE_XEN_SYSCTL_READ(cpupool_op, op);
@@ -791,6 +795,10 @@ POST(sysctl)
}
break;
+ case VKI_XEN_SYSCTL_sched_id:
+ POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
+ break;
+
case VKI_XEN_SYSCTL_cpupool_op:
if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
diff --git a/include/vki/vki-xen-sysctl.h b/include/vki/vki-xen-sysctl.h
index 32c8722..453752c 100644
--- a/include/vki/vki-xen-sysctl.h
+++ b/include/vki/vki-xen-sysctl.h
@@ -120,6 +120,11 @@ struct vki_xen_sysctl_physinfo_0000000a {
vki_uint32_t capabilities;
};
+struct vki_xen_sysctl_sched_id {
+ /* OUT variable. */
+ vki_uint32_t sched_id;
+};
+
struct vki_xen_sysctl {
vki_uint32_t cmd;
vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
@@ -130,7 +135,7 @@ struct vki_xen_sysctl {
struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
struct vki_xen_sysctl_topologyinfo topologyinfo;
struct vki_xen_sysctl_numainfo numainfo;
- //struct vki_xen_sysctl_sched_id sched_id;
+ struct vki_xen_sysctl_sched_id sched_id;
//struct vki_xen_sysctl_perfc_op perfc_op;
struct vki_xen_sysctl_getdomaininfolist_00000008 getdomaininfolist_00000008;
struct vki_xen_sysctl_getdomaininfolist_00000009 getdomaininfolist_00000009;
--
1.7.2.5
|
|
From: Ian C. <ian...@ci...> - 2013-06-12 13:43:03
|
New hypercalls:
- VKI_XENMEM_claim_pages
- VKI_XEN_DOMCTL_getnodeaffinity
- VKI_XEN_DOMCTL_setnodeaffinity
Plus placeholders for other new hypercalls which we don't yet support here.
New revision of sysctl and domctl interfaces, due to new field in
outstanding_pages field in physinfo and dominfo.
Xen changed the API but not ABI of cpumasks to be a more generic bitmask.
Switch to using the latest names.
---
coregrind/m_syswrap/syswrap-xen.c | 92 +++++++++++++++++++++++++++++++------
include/vki/vki-xen-domctl.h | 42 ++++++++++++++++-
include/vki/vki-xen-memory.h | 1 +
include/vki/vki-xen-sysctl.h | 28 ++++++++++-
include/vki/vki-xen.h | 4 +-
5 files changed, 144 insertions(+), 23 deletions(-)
diff --git a/coregrind/m_syswrap/syswrap-xen.c b/coregrind/m_syswrap/syswrap-xen.c
index be884a1..61aa1e1 100644
--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -104,7 +104,8 @@ PRE(memory_op)
}
case VKI_XENMEM_increase_reservation:
case VKI_XENMEM_decrease_reservation:
- case VKI_XENMEM_populate_physmap: {
+ case VKI_XENMEM_populate_physmap:
+ case VKI_XENMEM_claim_pages: {
struct xen_memory_reservation *memory_reservation =
(struct xen_memory_reservation *)ARG2;
const HChar *which;
@@ -125,6 +126,9 @@ PRE(memory_op)
(Addr)memory_reservation->extent_start.p,
sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
+ case VKI_XENMEM_claim_pages:
+ which = "XENMEM_claim_pages";
+ break;
default:
which = "XENMEM_unknown";
break;
@@ -354,6 +358,7 @@ PRE(sysctl) {
{
case 0x00000008:
case 0x00000009:
+ case 0x0000000a:
break;
default:
VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
@@ -470,6 +475,7 @@ PRE(domctl)
{
case 0x00000007:
case 0x00000008:
+ case 0x00000009:
break;
default:
VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
@@ -567,7 +573,17 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
(Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
- domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+ domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+ break;
+
+ case VKI_XEN_DOMCTL_getnodeaffinity:
+ __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+ break;
+ case VKI_XEN_DOMCTL_setnodeaffinity:
+ __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+ PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
+ (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+ domctl->u.nodeaffinity.nodemap.nr_bits / 8);
break;
case VKI_XEN_DOMCTL_getvcpucontext:
@@ -640,6 +656,7 @@ POST(memory_op)
switch (ARG1) {
case VKI_XENMEM_set_memory_map:
case VKI_XENMEM_decrease_reservation:
+ case VKI_XENMEM_claim_pages:
/* No outputs */
break;
case VKI_XENMEM_increase_reservation:
@@ -743,6 +760,7 @@ POST(sysctl)
{
case 0x00000008:
case 0x00000009:
+ case 0x0000000a:
break;
default:
return;
@@ -787,18 +805,39 @@ POST(sysctl)
break;
case VKI_XEN_SYSCTL_physinfo:
- POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
- POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
- POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
- POST_XEN_SYSCTL_WRITE(physinfo, max_cpu_id);
- POST_XEN_SYSCTL_WRITE(physinfo, nr_nodes);
- POST_XEN_SYSCTL_WRITE(physinfo, max_node_id);
- POST_XEN_SYSCTL_WRITE(physinfo, cpu_khz);
- POST_XEN_SYSCTL_WRITE(physinfo, total_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, free_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, scrub_pages);
- POST_XEN_SYSCTL_WRITE(physinfo, hw_cap[8]);
- POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ case 0x00000009: /* Unchanged from version 8 */
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
+ break;
+ case 0x0000000a:
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
+ POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
+ break;
+ }
break;
case VKI_XEN_SYSCTL_topologyinfo:
@@ -834,6 +873,7 @@ POST(domctl){
switch (domctl->interface_version) {
case 0x00000007:
case 0x00000008:
+ case 0x00000009:
break;
default:
return;
@@ -855,6 +895,7 @@ POST(domctl){
case VKI_XEN_DOMCTL_hypercall_init:
case VKI_XEN_DOMCTL_setvcpuaffinity:
case VKI_XEN_DOMCTL_setvcpucontext:
+ case VKI_XEN_DOMCTL_setnodeaffinity:
case VKI_XEN_DOMCTL_set_cpuid:
case VKI_XEN_DOMCTL_unpausedomain:
/* No output fields */
@@ -908,7 +949,12 @@ POST(domctl){
case VKI_XEN_DOMCTL_getvcpuaffinity:
POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
- domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+ domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+ break;
+
+ case VKI_XEN_DOMCTL_getnodeaffinity:
+ POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+ domctl->u.nodeaffinity.nodemap.nr_bits / 8);
break;
case VKI_XEN_DOMCTL_getdomaininfo:
@@ -942,6 +988,22 @@ POST(domctl){
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
break;
+ case 0x00000009:
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
+ break;
}
break;
case VKI_XEN_DOMCTL_getvcpucontext:
diff --git a/include/vki/vki-xen-domctl.h b/include/vki/vki-xen-domctl.h
index 241c008..815e0a7 100644
--- a/include/vki/vki-xen-domctl.h
+++ b/include/vki/vki-xen-domctl.h
@@ -7,6 +7,7 @@
*
* - 00000007: Xen 4.1
* - 00000008: Xen 4.2
+ * - 00000009: Xen 4.3
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -57,7 +58,7 @@
#define VKI_XEN_DOMCTL_pin_mem_cacheattr 41
#define VKI_XEN_DOMCTL_set_ext_vcpucontext 42
#define VKI_XEN_DOMCTL_get_ext_vcpucontext 43
-#define VKI_XEN_DOMCTL_set_opt_feature 44
+#define VKI_XEN_DOMCTL_set_opt_feature 44 /*Obsolete IA64 only */
#define VKI_XEN_DOMCTL_test_assign_device 45
#define VKI_XEN_DOMCTL_set_target 46
#define VKI_XEN_DOMCTL_deassign_device 47
@@ -80,6 +81,9 @@
#define VKI_XEN_DOMCTL_set_access_required 64
#define VKI_XEN_DOMCTL_audit_p2m 65
#define VKI_XEN_DOMCTL_set_virq_handler 66
+#define VKI_XEN_DOMCTL_set_broken_page_p2m 67
+#define VKI_XEN_DOMCTL_setnodeaffinity 68
+#define VKI_XEN_DOMCTL_getnodeaffinity 69
#define VKI_XEN_DOMCTL_gdbsx_guestmemio 1000
#define VKI_XEN_DOMCTL_gdbsx_pausevcpu 1001
#define VKI_XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -130,9 +134,39 @@ struct vki_xen_domctl_getdomaininfo_00000008 {
typedef struct vki_xen_domctl_getdomaininfo_00000008 vki_xen_domctl_getdomaininfo_00000008_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000008_t);
+struct vki_xen_domctl_getdomaininfo_00000009 {
+ /* OUT variables. */
+ vki_xen_domid_t domain;
+ vki_uint32_t flags;
+ vki_xen_uint64_aligned_t tot_pages;
+ vki_xen_uint64_aligned_t max_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_xen_uint64_aligned_t shr_pages;
+ vki_xen_uint64_aligned_t paged_pages;
+ vki_xen_uint64_aligned_t shared_info_frame;
+ vki_xen_uint64_aligned_t cpu_time;
+ vki_uint32_t nr_online_vcpus;
+ vki_uint32_t max_vcpu_id;
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t cpupool;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000009 vki_xen_domctl_getdomaininfo_00000009_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
+
+/* Get/set the NUMA node(s) with which the guest has affinity with. */
+/* XEN_DOMCTL_setnodeaffinity */
+/* XEN_DOMCTL_getnodeaffinity */
+struct vki_xen_domctl_nodeaffinity {
+ struct vki_xenctl_bitmap nodemap;/* IN */
+};
+typedef struct vki_xen_domctl_nodeaffinity vki_xen_domctl_nodeaffinity_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_nodeaffinity_t);
+
+
struct vki_xen_domctl_vcpuaffinity {
vki_uint32_t vcpu; /* IN */
- struct vki_xenctl_cpumap cpumap; /* IN/OUT */
+ struct vki_xenctl_bitmap cpumap; /* IN/OUT */
};
struct vki_xen_domctl_max_mem {
@@ -233,10 +267,12 @@ struct vki_xen_domctl {
struct vki_xen_domctl_createdomain createdomain;
struct vki_xen_domctl_getdomaininfo_00000007 getdomaininfo_00000007;
struct vki_xen_domctl_getdomaininfo_00000008 getdomaininfo_00000008;
+ struct vki_xen_domctl_getdomaininfo_00000009 getdomaininfo_00000009;
//struct vki_xen_domctl_getmemlist getmemlist;
//struct vki_xen_domctl_getpageframeinfo getpageframeinfo;
//struct vki_xen_domctl_getpageframeinfo2 getpageframeinfo2;
//struct vki_xen_domctl_getpageframeinfo3 getpageframeinfo3;
+ struct vki_xen_domctl_nodeaffinity nodeaffinity;
struct vki_xen_domctl_vcpuaffinity vcpuaffinity;
//struct vki_xen_domctl_shadow_op shadow_op;
struct vki_xen_domctl_max_mem max_mem;
@@ -266,7 +302,6 @@ struct vki_xen_domctl {
//struct vki_xen_domctl_ioport_mapping ioport_mapping;
//struct vki_xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
//struct vki_xen_domctl_ext_vcpucontext ext_vcpucontext;
- //struct vki_xen_domctl_set_opt_feature set_opt_feature;
//struct vki_xen_domctl_set_target set_target;
//struct vki_xen_domctl_subscribe subscribe;
//struct vki_xen_domctl_debug_op debug_op;
@@ -280,6 +315,7 @@ struct vki_xen_domctl {
//struct vki_xen_domctl_audit_p2m audit_p2m;
//struct vki_xen_domctl_set_virq_handler set_virq_handler;
//struct vki_xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ //struct vki_xen_domctl_set_broken_page_p2m set_broken_page_p2m;
//struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
//struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
vki_uint8_t pad[128];
diff --git a/include/vki/vki-xen-memory.h b/include/vki/vki-xen-memory.h
index 7de8d33..eac7871 100644
--- a/include/vki/vki-xen-memory.h
+++ b/include/vki/vki-xen-memory.h
@@ -20,6 +20,7 @@
#define VKI_XENMEM_get_pod_target 17
#define VKI_XENMEM_get_sharing_freed_pages 18
#define VKI_XENMEM_get_sharing_shared_pages 19
+#define VKI_XENMEM_claim_pages 24
struct vki_xen_memory_map {
unsigned int nr_entries;
diff --git a/include/vki/vki-xen-sysctl.h b/include/vki/vki-xen-sysctl.h
index c5178d7..32c8722 100644
--- a/include/vki/vki-xen-sysctl.h
+++ b/include/vki/vki-xen-sysctl.h
@@ -7,6 +7,7 @@
*
* - 00000008: Xen 4.1
* - 00000009: Xen 4.2
+ * - 0000000a: Xen 4.3
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -35,6 +36,7 @@
#define VKI_XEN_SYSCTL_numainfo 17
#define VKI_XEN_SYSCTL_cpupool_op 18
#define VKI_XEN_SYSCTL_scheduler_op 19
+#define VKI_XEN_SYSCTL_coverage_op 20
struct vki_xen_sysctl_getdomaininfolist_00000008 {
/* IN variables. */
@@ -69,7 +71,7 @@ struct vki_xen_sysctl_cpupool_op {
vki_uint32_t domid; /* IN: M */
vki_uint32_t cpu; /* IN: AR */
vki_uint32_t n_dom; /* OUT: I */
- struct vki_xenctl_cpumap cpumap; /* OUT: IF */
+ struct vki_xenctl_bitmap cpumap; /* OUT: IF */
};
struct vki_xen_sysctl_topologyinfo {
@@ -85,7 +87,7 @@ struct vki_xen_sysctl_numainfo {
VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memfree;
VKI_XEN_GUEST_HANDLE_64(vki_uint32) node_to_node_distance;
};
-struct vki_xen_sysctl_physinfo {
+struct vki_xen_sysctl_physinfo_00000008 {
vki_uint32_t threads_per_core;
vki_uint32_t cores_per_socket;
vki_uint32_t nr_cpus; /* # CPUs currently online */
@@ -101,13 +103,31 @@ struct vki_xen_sysctl_physinfo {
vki_uint32_t capabilities;
};
+struct vki_xen_sysctl_physinfo_0000000a {
+ vki_uint32_t threads_per_core;
+ vki_uint32_t cores_per_socket;
+ vki_uint32_t nr_cpus; /* # CPUs currently online */
+ vki_uint32_t max_cpu_id; /* Largest possible CPU ID on this host */
+ vki_uint32_t nr_nodes; /* # nodes currently online */
+ vki_uint32_t max_node_id; /* Largest possible node ID on this host */
+ vki_uint32_t cpu_khz;
+ vki_xen_uint64_aligned_t total_pages;
+ vki_xen_uint64_aligned_t free_pages;
+ vki_xen_uint64_aligned_t scrub_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_uint32_t hw_cap[8];
+
+ vki_uint32_t capabilities;
+};
+
struct vki_xen_sysctl {
vki_uint32_t cmd;
vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
//struct vki_xen_sysctl_readconsole readconsole;
//struct vki_xen_sysctl_tbuf_op tbuf_op;
- struct vki_xen_sysctl_physinfo physinfo;
+ struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
+ struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
struct vki_xen_sysctl_topologyinfo topologyinfo;
struct vki_xen_sysctl_numainfo numainfo;
//struct vki_xen_sysctl_sched_id sched_id;
@@ -124,6 +144,8 @@ struct vki_xen_sysctl {
//struct vki_xen_sysctl_lockprof_op lockprof_op;
struct vki_xen_sysctl_cpupool_op cpupool_op;
//struct vki_xen_sysctl_scheduler_op scheduler_op;
+ //struct vki_xen_sysctl_coverage_op coverage_op;
+
vki_uint8_t pad[128];
} u;
};
diff --git a/include/vki/vki-xen.h b/include/vki/vki-xen.h
index ed3cc1b..87fbb4f 100644
--- a/include/vki/vki-xen.h
+++ b/include/vki/vki-xen.h
@@ -71,9 +71,9 @@ __DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint16, vki_uint16_t);
__DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint32, vki_uint32_t);
__DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint64, vki_uint64_t);
-struct vki_xenctl_cpumap {
+struct vki_xenctl_bitmap {
VKI_XEN_GUEST_HANDLE_64(vki_uint8) bitmap;
- vki_uint32_t nr_cpus;
+ vki_uint32_t nr_bits;
};
#include <vki/vki-xen-domctl.h>
--
1.7.2.5
|
|
From: Ian C. <ian...@ci...> - 2013-06-12 13:43:01
|
---
coregrind/m_syswrap/syswrap-linux.c | 22 +++++++++++-----------
1 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c
index a42a572..039f8d4 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -6508,37 +6508,37 @@ PRE(sys_ioctl)
case VKI_XEN_IOCTL_PRIVCMD_MMAP: {
struct vki_xen_privcmd_mmap *args =
(struct vki_xen_privcmd_mmap *)(ARG3);
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP(num)",
(Addr)&args->num, sizeof(args->num));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP(dom)",
(Addr)&args->dom, sizeof(args->dom));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP(entry)",
(Addr)args->entry, sizeof(*(args->entry)) * args->num);
break;
}
case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH: {
struct vki_xen_privcmd_mmapbatch *args =
(struct vki_xen_privcmd_mmapbatch *)(ARG3);
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH(num)",
(Addr)&args->num, sizeof(args->num));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH(dom)",
(Addr)&args->dom, sizeof(args->dom));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH(addr)",
(Addr)&args->addr, sizeof(args->addr));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH(arr)",
(Addr)args->arr, sizeof(*(args->arr)) * args->num);
break;
}
case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2: {
struct vki_xen_privcmd_mmapbatch_v2 *args =
(struct vki_xen_privcmd_mmapbatch_v2 *)(ARG3);
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2(num)",
(Addr)&args->num, sizeof(args->num));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2(dom)",
(Addr)&args->dom, sizeof(args->dom));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2(addr)",
(Addr)&args->addr, sizeof(args->addr));
- PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2",
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2(arr)",
(Addr)args->arr, sizeof(*(args->arr)) * args->num);
break;
}
--
1.7.2.5
|