diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 05b8797163b2b16918d822c4a94bdf0fe58bbaee..14b9494c58ede10c31447148bf8105e2aa59dafe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1785,13 +1785,6 @@ static void __update_and_free_page(struct hstate *h, struct page *page) return; } - /* - * Move PageHWPoison flag from head page to the raw error pages, - * which makes any healthy subpages reusable. - */ - if (unlikely(PageHWPoison(page))) - hugetlb_clear_page_hwpoison(page); - /* * If vmemmap pages were allocated above, then we need to clear the * hugetlb destructor under the hugetlb lock. @@ -1802,6 +1795,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page) spin_unlock_irq(&hugetlb_lock); } + /* + * Move PageHWPoison flag from head page to the raw error pages, + * which makes any healthy subpages reusable. + */ + if (unlikely(PageHWPoison(page))) + hugetlb_clear_page_hwpoison(page); + for (i = 0; i < pages_per_huge_page(h); i++) { subpage = nth_page(page, i); subpage->flags &= ~(1 << PG_locked | 1 << PG_error |