Index: linux/arch/ia64/mm/hugetlbpage.c
===================================================================
--- linux.orig/arch/ia64/mm/hugetlbpage.c	2004-09-15 15:25:16.000000000 -0700
+++ linux/arch/ia64/mm/hugetlbpage.c	2004-09-15 15:59:39.000000000 -0700
@@ -87,6 +87,27 @@
 	return page;
 }
 
+/* variation on the above. acquire htlbpage_lock as in 2.6.9-rc2 */
+static struct page *__alloc_hugetlb_page2(struct vm_area_struct *vma, unsigned long addr)
+{
+	int i;
+	struct page *page;
+
+	spin_lock(&htlbpage_lock);
+	page = dequeue_huge_page(vma, addr);
+	if (!page) {
+		spin_unlock(&htlbpage_lock);
+		return NULL;
+	}
+	htlbpagemem[page_zone(page)->zone_pgdat->node_id]--;
+	spin_unlock(&htlbpage_lock);
+	set_page_count(page, 1);
+	page->lru.prev = (void *)free_huge_page;
+	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
+		clear_highpage(&page[i]);
+	return page;
+}
+
 static pte_t *
 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
 {
@@ -666,22 +687,19 @@
  retry:
 	page = find_get_page(mapping, idx);
 	if (!page) {
-		spin_lock(&htlbpage_lock);
 
 		/* Should do this at prefault time, but that gets us into
 		   trouble with freeing right now. We do a quick overcommit 
 		   check instead. */
 		ret = hugetlb_get_quota(mapping);
 		if (ret) {
-			spin_unlock(&htlbpage_lock);
 			ret = VM_FAULT_OOM;
 			goto out;
 		}
 		
-		page = __alloc_hugetlb_page(vma, addr);
+		page = __alloc_hugetlb_page2(vma, addr);
 		if (!page) {
 			hugetlb_put_quota(mapping);
-			spin_unlock(&htlbpage_lock);
 			
 			/* Instead of OOMing here could just transparently use
 			   small pages. */
@@ -698,13 +716,11 @@
 			hugetlb_put_quota(mapping);
 			if (put_page_testzero(page))
 				__free_huge_page(page);
-			spin_unlock(&htlbpage_lock);
 			if (ret == -EEXIST)
 				goto retry;
 			ret = VM_FAULT_SIGBUS;
 			goto out;
 		}
-		spin_unlock(&htlbpage_lock);
 		ret = VM_FAULT_MAJOR; 
 	} else
 		ret = VM_FAULT_MINOR;