📄 1021.lowlatency.patch
字号:
{ struct list_head *curr; struct page * page;@@ -271,6 +281,17 @@ while (curr != head) { unsigned long offset; + if (conditional_schedule_needed() && *restart_count) {+ (*restart_count)--;+ list_del(head);+ list_add(head, curr); /* Restart on this page */+ spin_unlock(&pagecache_lock);+ unconditional_schedule();+ spin_lock(&pagecache_lock);+ unlocked = 1;+ goto restart;+ }+ page = list_entry(curr, struct page, list); offset = page->index; @@ -303,13 +324,11 @@ } else wait_on_page(page); - page_cache_release(page);-- if (current->need_resched) {- __set_current_state(TASK_RUNNING);- schedule();+ if (LOWLATENCY_NEEDED) {+ *restart_count = 4; /* We made progress */ } + page_cache_release(page); spin_lock(&pagecache_lock); goto restart; }@@ -332,13 +351,14 @@ { unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);+ int restart_count = 4; int unlocked; spin_lock(&pagecache_lock); do {- unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial);- unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial);- unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);+ unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial, &restart_count);+ unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial, &restart_count);+ unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial, &restart_count); } while (unlocked); /* Traversed all three lists without dropping the lock */ spin_unlock(&pagecache_lock);@@ -483,6 +503,7 @@ page_cache_get(page); spin_unlock(&pagecache_lock);+ conditional_schedule(); /* sys_msync() (only used by minixfs, udf) */ lock_page(page); /* The buffers could have been free'd while we waited for the page lock */@@ -612,12 +633,14 @@ list_del(&page->list); list_add(&page->list, &mapping->locked_pages); - if (!PageDirty(page))- continue;- page_cache_get(page); spin_unlock(&pagecache_lock); + conditional_schedule(); /* sys_msync() */++ if (!PageDirty(page))+ goto clean;+ lock_page(page); if (PageDirty(page)) {@@ -628,7 +651,7 @@ ret = err; } else UnlockPage(page);-+clean: page_cache_release(page); spin_lock(&pagecache_lock); }@@ -646,7 +669,8 @@ int filemap_fdatawait(struct address_space * mapping) { int ret = 0;-+ DEFINE_RESCHED_COUNT;+restart: spin_lock(&pagecache_lock); while (!list_empty(&mapping->locked_pages)) {@@ -655,6 +679,17 @@ list_del(&page->list); list_add(&page->list, &mapping->clean_pages); + if (TEST_RESCHED_COUNT(32)) {+ RESET_RESCHED_COUNT();+ if (conditional_schedule_needed()) {+ page_cache_get(page);+ spin_unlock(&pagecache_lock);+ unconditional_schedule();+ page_cache_release(page);+ goto restart;+ }+ }+ if (!PageLocked(page)) continue; @@ -764,8 +799,10 @@ spin_lock(&pagecache_lock); page = __find_page_nolock(mapping, offset, *hash); spin_unlock(&pagecache_lock);- if (page)+ if (page) {+ conditional_schedule(); return 0;+ } page = page_cache_alloc(mapping); if (!page)@@ -1035,6 +1072,11 @@ * the hash-list needs a held write-lock. */ repeat:+ if (conditional_schedule_needed()) {+ spin_unlock(&pagecache_lock);+ unconditional_schedule();+ spin_lock(&pagecache_lock);+ } page = __find_page_nolock(mapping, offset, hash); if (page) { page_cache_get(page);@@ -1488,6 +1530,8 @@ page_cache_get(page); spin_unlock(&pagecache_lock); + conditional_schedule(); /* sys_read() */+ if (!Page_Uptodate(page)) goto page_not_up_to_date; generic_file_readahead(reada_ok, filp, inode, page);@@ -2247,6 +2291,12 @@ address += PAGE_SIZE; pte++; } while (address && (address < end));++ if (conditional_schedule_needed()) {+ spin_unlock(&vma->vm_mm->page_table_lock);+ unconditional_schedule(); /* syncing large mapped files */+ spin_lock(&vma->vm_mm->page_table_lock);+ } return error; } @@ -2656,7 +2706,9 @@ if (vma->vm_flags & VM_LOCKED) return -EINVAL; - zap_page_range(vma->vm_mm, start, end - start);+ zap_page_range(vma->vm_mm, start, end - start,+ ZPR_COND_RESCHED); /* sys_madvise(MADV_DONTNEED) */+ return 0; } @@ -3226,6 +3278,9 @@ goto sync_failure; page_fault = __copy_from_user(kaddr+offset, buf, bytes); flush_dcache_page(page);++ conditional_schedule();+ status = mapping->a_ops->commit_write(file, page, offset, offset+bytes); if (page_fault) goto fail_write;diff -Naur --exclude=CVS --exclude='*.o' --exclude='*.a' --exclude='*.so' --exclude='*.elf' --exclude=System.map --exclude=Makefile.d --exclude='*log' --exclude='*log2' --exclude='*~' --exclude='.*~' --exclude='.#*' --exclude='*.bak' --exclude='*.orig' --exclude='*.rej' --exclude='core.[0-9]*' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=vmlinux --exclude=vmlinux.bin --exclude=yamon-02.06-SIGMADESIGNS-01_el.bin linuxmips-2.4.30.ref/mm/memory.c linuxmips-2.4.30/mm/memory.c--- linuxmips-2.4.30.ref/mm/memory.c 2005-04-05 12:09:58.000000000 -0700+++ linuxmips-2.4.30/mm/memory.c 2005-07-22 09:36:02.000000000 -0700@@ -357,7 +357,7 @@ /* * remove user pages in a given range. */-void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)+static void do_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size) { mmu_gather_t *tlb; pgd_t * dir;@@ -478,6 +478,10 @@ struct page *map; while (!(map = follow_page(mm, start, write))) { spin_unlock(&mm->page_table_lock);++ /* Pinning down many physical pages (kiobufs, mlockall) */+ conditional_schedule();+ switch (handle_mm_fault(mm, vma, start, write)) { case 1: tsk->min_flt++;@@ -641,6 +645,21 @@ iobuf->locked = 0; } +#define MAX_ZAP_BYTES 256*PAGE_SIZE++void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, int actions)+{+ while (size) {+ unsigned long chunk = size;+ if (actions & ZPR_COND_RESCHED && chunk > MAX_ZAP_BYTES)+ chunk = MAX_ZAP_BYTES;+ do_zap_page_range(mm, address, chunk);+ if (actions & ZPR_COND_RESCHED)+ conditional_schedule();+ address += chunk;+ size -= chunk;+ }+} /* * Lock down all of the pages of a kiovec for IO.@@ -750,11 +769,18 @@ return 0; } -static inline void zeromap_pte_range(pte_t * pte, unsigned long address,- unsigned long size, pgprot_t prot)+static inline void zeromap_pte_range(struct mm_struct *mm, pte_t * pte,+ unsigned long address, unsigned long size,+ pgprot_t prot) { unsigned long end; + if (conditional_schedule_needed()) {+ spin_unlock(&mm->page_table_lock);+ unconditional_schedule(); /* mmap(/dev/zero) */+ spin_lock(&mm->page_table_lock);+ }+ address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE)@@ -782,7 +808,7 @@ pte_t * pte = pte_alloc(mm, pmd, address); if (!pte) return -ENOMEM;- zeromap_pte_range(pte, address, end - address, prot);+ zeromap_pte_range(mm, pte, address, end - address, prot); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end));@@ -1017,7 +1043,7 @@ /* mapping wholly truncated? */ if (mpnt->vm_pgoff >= pgoff) {- zap_page_range(mm, start, len);+ zap_page_range(mm, start, len, 0); continue; } @@ -1030,7 +1056,7 @@ /* Ok, partially affected.. */ start += diff << PAGE_SHIFT; len = (len - diff) << PAGE_SHIFT;- zap_page_range(mm, start, len);+ zap_page_range(mm, start, len, 0); } while ((mpnt = mpnt->vm_next_share) != NULL); } diff -Naur --exclude=CVS --exclude='*.o' --exclude='*.a' --exclude='*.so' --exclude='*.elf' --exclude=System.map --exclude=Makefile.d --exclude='*log' --exclude='*log2' --exclude='*~' --exclude='.*~' --exclude='.#*' --exclude='*.bak' --exclude='*.orig' --exclude='*.rej' --exclude='core.[0-9]*' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=vmlinux --exclude=vmlinux.bin --exclude=yamon-02.06-SIGMADESIGNS-01_el.bin linuxmips-2.4.30.ref/mm/mmap.c linuxmips-2.4.30/mm/mmap.c--- linuxmips-2.4.30.ref/mm/mmap.c 2005-01-13 02:59:04.000000000 -0800+++ linuxmips-2.4.30/mm/mmap.c 2005-07-22 09:36:02.000000000 -0700@@ -600,7 +600,7 @@ fput(file); /* Undo any partial mapping done by a device driver. */- zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);+ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start, 0); free_vma: kmem_cache_free(vm_area_cachep, vma); return error;@@ -1000,7 +1000,7 @@ remove_shared_vm_struct(mpnt); mm->map_count--; - zap_page_range(mm, st, size);+ zap_page_range(mm, st, size, ZPR_COND_RESCHED); /* sys_munmap() */ /* * Fix the mapping, and free the old area if it wasn't reused.@@ -1175,7 +1175,7 @@ } mm->map_count--; remove_shared_vm_struct(mpnt);- zap_page_range(mm, start, size);+ zap_page_range(mm, start, size, ZPR_COND_RESCHED); /* sys_exit() */ if (mp
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -