Menu

performance problem

Help
mauro
2009-10-02
2012-09-28
  • mauro

    mauro - 2009-10-02

    hello,
    if I retrieve eg. 1000 rows of a table with about 15 columns with plain JDBC
    it takes
    90 ms.
    if I do it with HA-JDBC in between, it takes :
    500 ms.

    this is a factor of 5 to 6 times slower, can anybody tell me what is going
    wrong ?

    I'm only using one DB isntance !

    my HA-JDBC cfg

    <ha-jdbc>
    <distributable config="jgroups.xml"
    stack="tcp_example"/
    <sync id="DifferentialSynchronization"
    class="net.sf.hajdbc.sync.DifferentialSynchronizationStrategy"/>
    <cluster balancer="load" default-
    sync="DifferentialSynchronization" dialect="mysql" meta-
    data-cache="eager" transaction-mode="parallel" failure-
    detect-schedule="0 * ? * " auto-activate-schedule="0 * ?
    *">
    <database id="mauroDB" weight="1">
    <driver>com.mysql.jdbc.Driver</driver>
    <url>jdbc:mysql://localhost:3306/tmed</url>
    <user>tmed</user>
    <password>tmed</password>
    </database>
    </cluster>
    </ha-jdbc>

    my jgroups cfg:

    <protocol_stacks>

    <stack name="tcp_example"
    description="TCP based stack, with flow control and message bundling.
    This is usually used when IP
    multicasting cannot be used in a network, e.g. because it is disabled (routers
    discard multicast).
    Note that TCP.bind_addr and TCPPING.initial_hosts should be set, possibly via
    system properties, e.g.
    -Djgroups.bind_addr=192.168.5.2 and -Djgroups.tcpping.initial_hosts=192.168.5.2">
    <config>
    <!-- Only change the port unless you really know what you are doing!-->
    <TCP start_port="6500"
    loopback="true"
    recv_buf_size="20000000"
    send_buf_size="640000"
    discard_incompatible_packets="true"
    max_bundle_size="64000"
    max_bundle_timeout="1"
    use_incoming_packet_handler="true"
    enable_bundling="false"
    use_send_queues="false"
    sock_conn_timeout="300"
    skip_suspected_members="true"

    use_concurrent_stack="true"

    thread_pool.enabled="true"
    thread_pool.min_threads="1"
    thread_pool.max_threads="25"
    thread_pool.keep_alive_time="5000"
    thread_pool.queue_enabled="false"
    thread_pool.queue_max_size="100"
    thread_pool.rejection_policy="Run"

    oob_thread_pool.enabled="true"
    oob_thread_pool.min_threads="1"
    oob_thread_pool.max_threads="8"
    oob_thread_pool.keep_alive_time="5000"
    oob_thread_pool.queue_enabled="false"
    oob_thread_pool.queue_max_size="100"
    oob_thread_pool.rejection_policy="Run"/>

    <!-- The 'initial_hosts' attribute will define which other hosts will join
    this JGroup channel -->
    <TCPPING timeout="500"
    initial_hosts="${jgroups.tcpping.initial_hosts:localhost}"
    port_range="1"
    num_initial_members="1"/>

    <!--MPING timeout="300"
    receive_on_all_interfaces="true"
    mcast_addr="228.8.8.8"
    mcast_port="7555"
    ip_ttl="8"
    num_initial_members="1"
    num_ping_requests="1"/-->

    <MERGE2 max_interval="100000"
    min_interval="20000"/>
    <FD_SOCK/>
    <FD timeout="10000" max_tries="5"
    shun="true"/>
    <VERIFY_SUSPECT timeout="1500" />
    <pbcast.NAKACK max_xmit_size="60000"
    use_mcast_xmit="false" gc_lag="0"
    retransmit_timeout="300,600,1200,2400,4800"
    discard_delivered_msgs="true"/>
    <pbcast.STABLE stability_delay="1000"
    desired_avg_gossip="50000"
    max_bytes="400000"/>
    <pbcast.GMS print_local_addr="true" join_timeout="3000"
    join_retry_timeout="2000" shun="false"
    view_bundling="true"/>
    <FC max_credits="2000000"
    min_threshold="0.10"/>
    <FRAG2 frag_size="60000" />
    <pbcast.STREAMING_STATE_TRANSFER use_reading_thread="true"/>
    <pbcast.FLUSH timeout="0"/>
    </config>
    </stack>

    </protocol_stacks>

     
  • Paul Ferraro

    Paul Ferraro - 2010-01-20

    Please attach your client code. What exactly are you timing? Just the
    ResultSet.executeQuery(...)? or everything from connection acquisition to
    connection close? Does this include processing of the results?

     

Log in to post a comment.