转自:https://blog.csdn.net/21cnbao/article/details/7309757

在我们使用ARM等嵌入式Linux系统的时候,一个头疼的问题是GPU,Camera,HDMI等都需要预留大量连续内存,这部分内存平时不用,但是一般的做法又必须先预留着。目前,Marek Szyprowski和Michal Nazarewicz实现了一套全新的Contiguous Memory Allocator。通过这套机制,我们可以做到不预留内存,这些内存平时是可用的,只有当需要的时候才被分配给Camera,HDMI等设备。下面分析它的基本代码流程。

声明连续内存

内核启动过程中arch/arm/mm/init.c中的arm_memblock_init()会调用dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));

该函数位于:drivers/base/dma-contiguous.c

  1. /**
  2. * dma_contiguous_reserve() - reserve area for contiguous memory handling
  3. * @limit: End address of the reserved memory (optional, 0 for any).
  4. *
  5. * This function reserves memory from early allocator. It should be
  6. * called by arch specific code once the early allocator (memblock or bootmem)
  7. * has been activated and all other subsystems have already allocated/reserved
  8. * memory.
  9. */
  10. void __init dma_contiguous_reserve(phys_addr_t limit)
  11. {
  12. unsigned long selected_size = 0;
  13. pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
  14. if (size_cmdline != -1) {
  15. selected_size = size_cmdline;
  16. } else {
  17. #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
  18. selected_size = size_bytes;
  19. #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
  20. selected_size = cma_early_percent_memory();
  21. #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
  22. selected_size = min(size_bytes, cma_early_percent_memory());
  23. #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
  24. selected_size = max(size_bytes, cma_early_percent_memory());
  25. #endif
  26. }
  27. if (selected_size) {
  28. pr_debug("%s: reserving %ld MiB for global area\n", __func__,
  29. selected_size / SZ_1M);
  30. dma_declare_contiguous(NULL, selected_size, 0, limit);
  31. }
  32. };

其中的size_bytes定义为:

static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M; 默认情况下,CMA_SIZE_MBYTES会被定义为16MB,来源于CONFIG_CMA_SIZE_MBYTES=16

->

  1. int __init dma_declare_contiguous(struct device *dev, unsigned long size,
  2. phys_addr_t base, phys_addr_t limit)
  3. {
  4. ...
  5. /* Reserve memory */
  6. if (base) {
  7. if (memblock_is_region_reserved(base, size) ||
  8. memblock_reserve(base, size) < 0) {
  9. base = -EBUSY;
  10. goto err;
  11. }
  12. } else {
  13. /*
  14. * Use __memblock_alloc_base() since
  15. * memblock_alloc_base() panic()s.
  16. */
  17. phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
  18. if (!addr) {
  19. base = -ENOMEM;
  20. goto err;
  21. } else if (addr + size > ~(unsigned long)0) {
  22. memblock_free(addr, size);
  23. base = -EINVAL;
  24. base = -EINVAL;
  25. goto err;
  26. } else {
  27. base = addr;
  28. }
  29. }
  30. /*
  31.          * Each reserved area must be initialised later, when more kernel
  32.          * subsystems (like slab allocator) are available.
  33.          */
  34. r->start = base;
  35. r->size = size;
  36. r->dev = dev;
  37. cma_reserved_count++;
  38. pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
  39. (unsigned long)base);
  40. /* Architecture specific contiguous memory fixup. */
  41. dma_contiguous_early_fixup(base, size);
  42. return 0;
  43. err:
  44. pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
  45. return base;
  46. }

由此可见,连续内存区域也是在内核启动的早期,通过__memblock_alloc_base()拿到的。

另外:

drivers/base/dma-contiguous.c里面的core_initcall()会导致cma_init_reserved_areas()被调用:

  1. static int __init cma_init_reserved_areas(void)
  2. {
  3. struct cma_reserved *r = cma_reserved;
  4. unsigned i = cma_reserved_count;
  5. pr_debug("%s()\n", __func__);
  6. for (; i; --i, ++r) {
  7. struct cma *cma;
  8. cma = cma_create_area(PFN_DOWN(r->start),
  9. r->size >> PAGE_SHIFT);
  10. if (!IS_ERR(cma))
  11. dev_set_cma_area(r->dev, cma);
  12. }
  13. return 0;
  14. }
  15. core_initcall(cma_init_reserved_areas);

cma_create_area()会调用cma_activate_area(),cma_activate_area()函数则会针对每个page调用:

init_cma_reserved_pageblock(pfn_to_page(base_pfn));

这个函数则会通过set_pageblock_migratetype(page, MIGRATE_CMA)将页设置为MIGRATE_CMA类型的:

  1. #ifdef CONFIG_CMA
  2. /* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
  3. void __init init_cma_reserved_pageblock(struct page *page)
  4. {
  5. unsigned i = pageblock_nr_pages;
  6. struct page *p = page;
  7. do {
  8. __ClearPageReserved(p);
  9. set_page_count(p, 0);
  10. } while (++p, --i);
  11. set_page_refcounted(page);
  12. set_pageblock_migratetype(page, MIGRATE_CMA);
  13. __free_pages(page, pageblock_order);
  14. totalram_pages += pageblock_nr_pages;
  15. }
  16. #endif

同时其中调用的__free_pages(page, pageblock_order);最终会调用到__free_one_page(page, zone, order, migratetype);
相关的page会被加到MIGRATE_CMA的free_list上面去:

list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);

申请连续内存

申请连续内存仍然使用标准的arch/arm/mm/dma-mapping.c中定义的dma_alloc_coherent()和dma_alloc_writecombine(),这二者会间接调用drivers/base/dma-contiguous.c中的

  1. struct page *dma_alloc_from_contiguous(struct device *dev, int count,
  2. unsigned int align)

->

  1. struct page *dma_alloc_from_contiguous(struct device *dev, int count,
  2. unsigned int align)
  3. {
  4. ...
  5. for (;;) {
  6. pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
  7. start, count, mask);
  8. if (pageno >= cma->count) {
  9. ret = -ENOMEM;
  10. goto error;
  11. }
  12. pfn = cma->base_pfn + pageno;
  13. ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
  14. if (ret == 0) {
  15. bitmap_set(cma->bitmap, pageno, count);
  16. break;
  17. } else if (ret != -EBUSY) {
  18. goto error;
  19. }
  20. pr_debug("%s(): memory range at %p is busy, retrying\n",
  21. __func__, pfn_to_page(pfn));
  22. /* try again with a bit different memory target */
  23. start = pageno + mask + 1;
  24. }
  25. ...
  26. }

->

int alloc_contig_range(unsigned long start, unsigned long end,

unsigned migratetype)

需要隔离page,隔离page的作用通过代码的注释可以体现:

  1. /*
  2. * What we do here is we mark all pageblocks in range as
  3. * MIGRATE_ISOLATE. Because of the way page allocator work, we
  4. * align the range to MAX_ORDER pages so that page allocator
  5. * won't try to merge buddies from different pageblocks and
  6. * change MIGRATE_ISOLATE to some other migration type.
  7. *
  8. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  9. * migrate the pages from an unaligned range (ie. pages that
  10. * we are interested in). This will put all the pages in
  11. * range back to page allocator as MIGRATE_ISOLATE.
  12. *
  13. * When this is done, we take the pages in range from page
  14. * allocator removing them from the buddy system. This way
  15. * page allocator will never consider using them.
  16. *
  17. * This lets us mark the pageblocks back as
  18. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  19. * MAX_ORDER aligned range but not in the unaligned, original
  20. * range are put back to page allocator so that buddy can use
  21. * them.
  22. */
  23. ret = start_isolate_page_range(pfn_align_to_maxpage_down(start),
  24. pfn_align_to_maxpage_up(end),
  25. migratetype);

简单地说,就是把相关的page标记为MIGRATE_ISOLATE,这样buddy系统就不会再使用他们。

  1. /*
  2. * start_isolate_page_range() -- make page-allocation-type of range of pages
  3. * to be MIGRATE_ISOLATE.
  4. * @start_pfn: The lower PFN of the range to be isolated.
  5. * @end_pfn: The upper PFN of the range to be isolated.
  6. * @migratetype: migrate type to set in error recovery.
  7. *
  8. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  9. * the range will never be allocated. Any free pages and pages freed in the
  10. * future will not be allocated again.
  11. *
  12. * start_pfn/end_pfn must be aligned to pageblock_order.
  13. * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  14. */
  15. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  16. unsigned migratetype)
  17. {
  18. unsigned long pfn;
  19. unsigned long undo_pfn;
  20. struct page *page;
  21. BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  22. BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  23. for (pfn = start_pfn;
  24. pfn < end_pfn;
  25. pfn += pageblock_nr_pages) {
  26. page = __first_valid_page(pfn, pageblock_nr_pages);
  27. if (page && set_migratetype_isolate(page)) {
  28. undo_pfn = pfn;
  29. goto undo;
  30. }
  31. }
  32. return 0;
  33. undo:
  34. for (pfn = start_pfn;
  35. pfn < undo_pfn;
  36. pfn += pageblock_nr_pages)
  37. unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
  38. return -EBUSY;
  39. }

接下来调用__alloc_contig_migrate_range()进行页面隔离和迁移:

  1. static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
  2. {
  3. /* This function is based on compact_zone() from compaction.c. */
  4. unsigned long pfn = start;
  5. unsigned int tries = 0;
  6. int ret = 0;
  7. struct compact_control cc = {
  8. .nr_migratepages = 0,
  9. .order = -1,
  10. .zone = page_zone(pfn_to_page(start)),
  11. .sync = true,
  12. };
  13. INIT_LIST_HEAD(&cc.migratepages);
  14. migrate_prep_local();
  15. while (pfn < end || !list_empty(&cc.migratepages)) {
  16. if (fatal_signal_pending(current)) {
  17. ret = -EINTR;
  18. break;
  19. }
  20. if (list_empty(&cc.migratepages)) {
  21. cc.nr_migratepages = 0;
  22. pfn = isolate_migratepages_range(cc.zone, &cc,
  23. pfn, end);
  24. if (!pfn) {
  25. ret = -EINTR;
  26. break;
  27. }
  28. tries = 0;
  29. } else if (++tries == 5) {
  30. ret = ret < 0 ? ret : -EBUSY;
  31. break;
  32. }
  33. ret = migrate_pages(&cc.migratepages,
  34. __alloc_contig_migrate_alloc,
  35. 0, false, true);
  36. }
  37. putback_lru_pages(&cc.migratepages);
  38. return ret > 0 ? 0 : ret;
  39. }

其中的函数migrate_pages()会完成页面的迁移,迁移过程中通过传入的__alloc_contig_migrate_alloc()申请新的page,并将老的page付给新的page:

  1. int migrate_pages(struct list_head *from,
  2. new_page_t get_new_page, unsigned long private, bool offlining,
  3. bool sync)
  4. {
  5. int retry = 1;
  6. int nr_failed = 0;
  7. int pass = 0;
  8. struct page *page;
  9. struct page *page2;
  10. int swapwrite = current->flags & PF_SWAPWRITE;
  11. int rc;
  12. if (!swapwrite)
  13. current->flags |= PF_SWAPWRITE;
  14. for(pass = 0; pass < 10 && retry; pass++) {
  15. retry = 0;
  16. list_for_each_entry_safe(page, page2, from, lru) {
  17. cond_resched();
  18. rc = unmap_and_move(get_new_page, private,
  19. page, pass > 2, offlining,
  20. sync);
  21. switch(rc) {
  22. case -ENOMEM:
  23. goto out;
  24. case -EAGAIN:
  25. retry++;
  26. break;
  27. case 0:
  28. break;
  29. default:
  30. /* Permanent failure */
  31. nr_failed++;
  32. break;
  33. }
  34. }
  35. }
  36. rc = 0;
  37. ...
  38. }

其中的unmap_and_move()函数较为关键,它定义在mm/migrate.c中

  1. /*
  2. * Obtain the lock on page, remove all ptes and migrate the page
  3. * to the newly allocated page in newpage.
  4. */
  5. static int unmap_and_move(new_page_t get_new_page, unsigned long private,
  6. struct page *page, int force, bool offlining, bool sync)
  7. {
  8. int rc = 0;
  9. int *result = NULL;
  10. struct page *newpage = get_new_page(page, private, &result);
  11. int remap_swapcache = 1;
  12. int charge = 0;
  13. struct mem_cgroup *mem = NULL;
  14. struct anon_vma *anon_vma = NULL;
  15. ...
  16. /* charge against new page */
  17. charge = mem_cgroup_prepare_migration(page, newpage, &mem);
  18. ...
  19. if (PageWriteback(page)) {
  20. if (!force || !sync)
  21. goto uncharge;
  22. wait_on_page_writeback(page);
  23. }
  24. /*
  25. * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
  26. * we cannot notice that anon_vma is freed while we migrates a page.
  27. * This get_anon_vma() delays freeing anon_vma pointer until the end
  28. * of migration. File cache pages are no problem because of page_lock()
  29. * File Caches may use write_page() or lock_page() in migration, then,
  30. * just care Anon page here.
  31. */
  32. if (PageAnon(page)) {
  33. /*
  34. * Only page_lock_anon_vma() understands the subtleties of
  35. * getting a hold on an anon_vma from outside one of its mms.
  36. */
  37. anon_vma = page_lock_anon_vma(page);
  38. if (anon_vma) {
  39. /*
  40. * Take a reference count on the anon_vma if the
  41. * page is mapped so that it is guaranteed to
  42. * exist when the page is remapped later
  43. */
  44. get_anon_vma(anon_vma);
  45. page_unlock_anon_vma(anon_vma);
  46. } else if (PageSwapCache(page)) {
  47. /*
  48. * We cannot be sure that the anon_vma of an unmapped
  49. * swapcache page is safe to use because we don't
  50. * know in advance if the VMA that this page belonged
  51. * to still exists. If the VMA and others sharing the
  52. * data have been freed, then the anon_vma could
  53. * already be invalid.
  54. *
  55. * To avoid this possibility, swapcache pages get
  56. * migrated but are not remapped when migration
  57. * completes
  58. */
  59. remap_swapcache = 0;
  60. } else {
  61. goto uncharge;
  62. }
  63. }
  64. ...
  65. /* Establish migration ptes or remove ptes */
  66. try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
  67. skip_unmap:
  68. if (!page_mapped(page))
  69. rc = move_to_new_page(newpage, page, remap_swapcache);
  70. if (rc && remap_swapcache)
  71. remove_migration_ptes(page, page);
  72. /* Drop an anon_vma reference if we took one */
  73. if (anon_vma)
  74. drop_anon_vma(anon_vma);
  75. uncharge:
  76. if (!charge)
  77. mem_cgroup_end_migration(mem, page, newpage, rc == 0);
  78. unlock:
  79. unlock_page(page);
  80. move_newpage:
  81. ...
  82. }

通过unmap_and_move(),老的page就被迁移过去新的page。

接下来要回收page,回收page的作用是,不至于因为拿了连续的内存后,系统变得内存饥饿:

->

  1. /*
  2. * Reclaim enough pages to make sure that contiguous allocation
  3. * will not starve the system.
  4. */
  5. __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);

->

  1. /*
  2. * Trigger memory pressure bump to reclaim some pages in order to be able to
  3. * allocate 'count' pages in single page units. Does similar work as
  4. *__alloc_pages_slowpath() function.
  5. */
  6. static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
  7. {
  8. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  9. struct zonelist *zonelist = node_zonelist(0, gfp_mask);
  10. int did_some_progress = 0;
  11. int order = 1;
  12. unsigned long watermark;
  13. /*
  14. * Increase level of watermarks to force kswapd do his job
  15. * to stabilise at new watermark level.
  16. */
  17. __update_cma_watermarks(zone, count);
  18. /* Obey watermarks as if the page was being allocated */
  19. watermark = low_wmark_pages(zone) + count;
  20. while (!zone_watermark_ok(zone, 0, watermark, 0, 0)) {
  21. wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
  22. did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
  23. NULL);
  24. if (!did_some_progress) {
  25. /* Exhausted what can be done so it's blamo time */
  26. out_of_memory(zonelist, gfp_mask, order, NULL);
  27. }
  28. }
  29. /* Restore original watermark levels. */
  30. __update_cma_watermarks(zone, -count);
  31. return count;
  32. }

释放连续内存

内存释放的时候也比较简单,直接就是:

arch/arm/mm/dma-mapping.c:

void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)

->

arch/arm/mm/dma-mapping.c:

  1. static void __free_from_contiguous(struct device *dev, struct page *page,
  2. size_t size)
  3. {
  4. __dma_remap(page, size, pgprot_kernel);
  5. dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
  6. }

->

  1. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  2. int count)
  3. {
  4. ...
  5. free_contig_range(pfn, count);
  6. ..
  7. }

->

  1. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  2. {
  3. for (; nr_pages--; ++pfn)
  4. __free_page(pfn_to_page(pfn));
  5. }

将page交还给buddy。

内核内存分配的migratetype

内核内存分配的时候,带的标志是GFP_,但是GFP_可以转化为migratetype:

  1. static inline int allocflags_to_migratetype(gfp_t gfp_flags)
  2. {
  3. WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
  4. if (unlikely(page_group_by_mobility_disabled))
  5. return MIGRATE_UNMOVABLE;
  6. /* Group based on mobility */
  7. return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
  8. ((gfp_flags & __GFP_RECLAIMABLE) != 0);
  9. }

之后申请内存的时候,会对比迁移类型匹配的free_list:

  1. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  2. zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
  3. preferred_zone, migratetype);

另外,笔者也编写了一个测试程序,透过它随时测试CMA的功能:

  1. /*
  2. * kernel module helper for testing CMA
  3. *
  4. * Licensed under GPLv2 or later.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/fs.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/dma-mapping.h>
  11. #define CMA_NUM 10
  12. static struct device *cma_dev;
  13. static dma_addr_t dma_phys[CMA_NUM];
  14. static void *dma_virt[CMA_NUM];
  15. /* any read request will free coherent memory, eg.
  16. * cat /dev/cma_test
  17. */
  18. static ssize_t
  19. cma_test_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  20. {
  21. int i;
  22. for (i = 0; i < CMA_NUM; i++) {
  23. if (dma_virt[i]) {
  24. dma_free_coherent(cma_dev, (i + 1) * SZ_1M, dma_virt[i], dma_phys[i]);
  25. _dev_info(cma_dev, "free virt: %p phys: %p\n", dma_virt[i], (void *)dma_phys[i]);
  26. dma_virt[i] = NULL;
  27. break;
  28. }
  29. }
  30. return 0;
  31. }
  32. /*
  33. * any write request will alloc coherent memory, eg.
  34. * echo 0 > /dev/cma_test
  35. */
  36. static ssize_t
  37. cma_test_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  38. {
  39. int i;
  40. int ret;
  41. for (i = 0; i < CMA_NUM; i++) {
  42. if (!dma_virt[i]) {
  43. dma_virt[i] = dma_alloc_coherent(cma_dev, (i + 1) * SZ_1M, &dma_phys[i], GFP_KERNEL);
  44. if (dma_virt[i]) {
  45. void *p;
  46. /* touch every page in the allocated memory */
  47. for (p = dma_virt[i]; p < dma_virt[i] + (i + 1) * SZ_1M; p += PAGE_SIZE)
  48. *(u32 *)p = 0;
  49. _dev_info(cma_dev, "alloc virt: %p phys: %p\n", dma_virt[i], (void *)dma_phys[i]);
  50. } else {
  51. dev_err(cma_dev, "no mem in CMA area\n");
  52. ret = -ENOMEM;
  53. }
  54. break;
  55. }
  56. }
  57. return count;
  58. }
  59. static const struct file_operations cma_test_fops = {
  60. .owner = THIS_MODULE,
  61. .read = cma_test_read,
  62. .write = cma_test_write,
  63. };
  64. static struct miscdevice cma_test_misc = {
  65. .name = "cma_test",
  66. .fops = &cma_test_fops,
  67. };
  68. static int __init cma_test_init(void)
  69. {
  70. int ret = 0;
  71. ret = misc_register(&cma_test_misc);
  72. if (unlikely(ret)) {
  73. pr_err("failed to register cma test misc device!\n");
  74. return ret;
  75. }
  76. cma_dev = cma_test_misc.this_device;
  77. cma_dev->coherent_dma_mask = ~0;
  78. _dev_info(cma_dev, "registered.\n");
  79. return ret;
  80. }
  81. module_init(cma_test_init);
  82. static void __exit cma_test_exit(void)
  83. {
  84. misc_deregister(&cma_test_misc);
  85. }
  86. module_exit(cma_test_exit);
  87. MODULE_LICENSE("GPL");
  88. MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
  89. MODULE_DESCRIPTION("kernel module to help the test of CMA");
  90. MODULE_ALIAS("CMA test");

申请内存:

# echo 0 > /dev/cma_test

释放内存:

# cat /dev/cma_test

参考链接:

[1] http://www.spinics.net/lists/arm-kernel/msg160854.html

[2] http://www.spinics.net/lists/arm-kernel/msg162063.html

[3] http://lwn.net/Articles/447405/

转载于:https://www.cnblogs.com/sky-heaven/p/9549482.html

Linux内核最新的连续内存分配器(CMA)——避免预留大块内存【转】相关推荐

  1. Linux内核最新的连续内存分配器(CMA)——避免预留大块内存

    By LiAnLab.org/宋宝华 在我们使用ARM等嵌入式Linux系统的时候,一个头疼的问题是GPU,Camera,HDMI等都需要预留大量连续内存,这部分内存平时不用, 但是一般的做法又必须先 ...

  2. [内存管理]连续内存分配器(CMA)概述

    作者:Younger Liu, 本作品采用知识共享署名-非商业性使用-相同方式共享 3.0 未本地化版本许可协议进行许可. 原文地址:http://lwn.net/Articles/396657/ 1 ...

  3. Linux内核:Gigantic巨页与CMA的结合的PATCH补丁提交

    目录 概述 完整的PATCH 推荐阅读 概述 Facebook的Roman Gushcin发送的这个patch把Gigantic巨页(SIZE:1GB)与CMA进行了一个完美的结合: https:// ...

  4. 奔跑吧Linux内核最新目录

    <奔跑吧Linux内核> 即将和大家见面,敬请关注! 敬请关注<奔跑吧Linux内核>,即将和大家见面. 微信号:runninglinuxkernel 微博/微信公众号:奔跑吧 ...

  5. 龙芯linux内核,最新龙芯3A2000/3B2000爆发!Linux内核官方支持

    据龙芯开源社区消息,龙芯3A2000/3B2000处理器的支持代码已经加入了Linux官方内核,会在最新的版本发布时正式采用. linux-mips官方Git代码库 据介绍,龙芯3A2000/3B20 ...

  6. linux内存分配器类型,内核早期内存分配器:memblock

    原标题:内核早期内存分配器:memblock 本文转载自Linux爱好者 本文来自 程雪涛的自荐投稿 Linux内核使用伙伴系统管理内存,那么在伙伴系统工作前,如何管理内存?答案是memblock. ...

  7. Linux内核内存相关问题,这一篇让你彻底了解

    linux 内存是后台开发人员,需要深入了解的计算机资源.合理的使用内存,有助于提升机器的性能和稳定性.本文主要介绍 linux 内存组织结构和页面布局,内存碎片产生原因和优化算法,linux 内核几 ...

  8. linux 内核内存管理

    物理内存 相关数据结构 page(页) Linux 内核内存管理的实现以 page 数据结构为核心,其他的内存管理设施都基于 page 数据结构,如 VMA 管理.缺页中断.RMAP.页面分配与回收等 ...

  9. linux内核源码剖析 博客,【Linux内存源码分析】页面迁移

    页面迁移其实是伙伴管理算法中的一部分,鉴于其特殊性,特地另行分析.它是2007年的时候,2.6.24内核版本开发时,新增碎片减少策略(the fragmentation reduction strat ...

最新文章

  1. OpenWRT中运行脚本报错
  2. mysql warning 在哪看_查看MySQL的warning
  3. 转:python 实现GUI(图形用户界面)编程
  4. 摄像机投射投影模型_综述及详解
  5. 选文可以学计算机专业,是不是文理科都可以报计算机专业?
  6. 深度学习-Tensorflow2.2-深度学习基础和tf.keras{1}-梯度下降算法概述-03
  7. 大规模运行MongoDB应该知道的10件事
  8. 10.2.5 监视文件.
  9. 配置机器学习训练环境太麻烦?开源工具Parris说一键就搞定
  10. 键盘拆开重新安装步骤_电脑键盘如何维修 电脑键盘常见问题维修技巧【详解】...
  11. 使用crash工具分析高通ramdump
  12. java ppt转图片,怎么用POI将PPT的内容转换为图片
  13. 浅析DNS劫持和HTTP劫持
  14. h5破解网站图片防盗链
  15. Update 语法全介绍
  16. Python OpenCv 实现实时人脸识别及面部距离测量
  17. 树莓派3B安装64位操作系统(树莓派无需连接显示器键盘鼠标)
  18. python连接oracle12c
  19. IPv6实验NDP地址解析和DAD
  20. eclipse写python怎么样_eclipse python 使用教程(怎么用eclipse写python)

热门文章

  1. Flutter 构建一个完整的聊天应用程序
  2. de casteljau算法_泊松分布算法的应用:开一家4S店
  3. 你真的做好数字化运营了吗?来直播间,给你加点儿“灵感”丨教育专题
  4. CentOS7下解决ifconfig command not found的办法
  5. 5.16 Stacks and Queues
  6. 多线程(三)之ReentrantLock源码解析
  7. 在VIM里面设置自动补全功能
  8. linux与windows下tomcat的java内存设置
  9. Java代码03-打印一个菱形
  10. 从 setNeedsLayout 说起