Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 23 Jan 2006 15:24:15 +0800
From:      Huang wen hui <hwh@gddsn.org.cn>
To:        Michael Vince <mv@roq.com>
Cc:        java@freebsd.org
Subject:   Re: Performance patch for jdk1.5.0/amd64
Message-ID:  <43D4849F.3010706@gddsn.org.cn>
In-Reply-To: <43D45439.8020309@roq.com>
References:  <43C92741.7000803@gddsn.org.cn> <43D45439.8020309@roq.com>

next in thread | previous in thread | raw e-mail | index | archive | help
This is a multi-part message in MIME format.
--------------080100060807010907030907
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Content-Transfer-Encoding: 7bit

Michael Vince wrote:

> Hi,
> Do you have this as an attached file? I tried to use this patch but it 
> appears to be mangled.
>
> Regards,
> Mike
>
check this.

--------------080100060807010907030907
Content-Type: text/x-patch;
 name="amd64.ad.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
 filename="amd64.ad.patch"

--- ../../hotspot/src/cpu/amd64/vm/amd64.ad.orig	Sat Jan 14 20:06:02 2006
+++ ../../hotspot/src/cpu/amd64/vm/amd64.ad	Sat Jan 14 20:05:37 2006
@@ -6095,6 +6095,18 @@
   ins_pipe(pipe_slow); // XXX
 %}
 
+instruct prefetcht0(memory mem)
+%{
+  match(Prefetch mem);
+  predicate(!VM_Version::has_prefetchw());
+  ins_cost(125);
+
+  format %{ "prefetcht0 $mem\t# prefetch into L1" %}
+  opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
+  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
+  ins_pipe(pipe_slow);
+%}
+
 instruct prefetch(memory mem)
 %{
   match(Prefetch mem);

--------------080100060807010907030907
Content-Type: text/x-patch;
 name="prefetch_bsd_amd64.inline.hpp.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
 filename="prefetch_bsd_amd64.inline.hpp.patch"

--- ../../hotspot/src/os_cpu/bsd_amd64/vm/prefetch_bsd_amd64.inline.hpp.orig	Sat Jan 14 23:51:41 2006
+++ ../../hotspot/src/os_cpu/bsd_amd64/vm/prefetch_bsd_amd64.inline.hpp	Sat Jan 14 23:52:54 2006
@@ -6,24 +6,14 @@
  * SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
  */
 
-inline void Prefetch::read(void* loc, intx interval) 
+inline void Prefetch::read(void* loc, intx interval)
 {
-  __builtin_prefetch((char*) loc + interval, 0); // prefetcht0 (%rsi, %rdi,1)
+  __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
 }
 
 inline void Prefetch::write(void* loc, intx interval)
 {
-  // Force prefetchw.  The gcc builtin produces prefetcht0 or prefetchw
-  // depending on command line switches we don't control here.
-  // Use of this method should be gated by VM_Version::has_prefetchw.
-  /*
-   * How do we invoke VM_Version::has_prefetchw here?
-   * Can we do something at compile time instead to remove that overhead?
-   */
-//#ifdef __amd64__
-//  __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
-//#elif __em64t__
+  // Do not use the 3dnow prefetchw instruction.  It isn't supported on em64t.
+  //  __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
   __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
-//#endif
-  //  __builtin_prefetch((char*) loc + interval, 1); // prefetcht0/prefetchw (%rsi,%rdi,1)
 }

--------------080100060807010907030907--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?43D4849F.3010706>