linux内核奇遇记之md源代码解读之十raid5数据流之同步数据流程
- 3379 static void handle_stripe(struct stripe_head *sh)
- 3380 {
- 3381 struct stripe_head_state s;
- 3382 struct r5conf *conf = sh->raid_conf;
- 3383 int i;
- 3384 int prexor;
- 3385 int disks = sh->disks;
- 3386 struct r5dev *pdev, *qdev;
- 3387
- 3388 clear_bit(STRIPE_HANDLE, &sh->state);
- 3389 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
- 3390 /* already being handled, ensure it gets handled
- 3391 * again when current action finishes */
- 3392 set_bit(STRIPE_HANDLE, &sh->state);
- 3393 return;
- 3394 }
- 3395
- 3396 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
- 3397 set_bit(STRIPE_SYNCING, &sh->state);
- 3398 clear_bit(STRIPE_INSYNC, &sh->state);
- 3399 }
- 3400 clear_bit(STRIPE_DELAYED, &sh->state);
- 3401
- 3402 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
- 3403 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
- 3404 (unsigned long long)sh->sector, sh->state,
- 3405 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
- 3406 sh->check_state, sh->reconstruct_state);
- 3407
- 3408 analyse_stripe(sh, &s);
- enum {
- STRIPE_ACTIVE, // 正在处理
- STRIPE_HANDLE, // 需要处理
- STRIPE_SYNC_REQUESTED, // 同步请求
- STRIPE_SYNCING, // 正在处理同步
- STRIPE_INSYNC, // 条带已同步
- STRIPE_PREREAD_ACTIVE, // 预读
- STRIPE_DELAYED, // 延迟处理
- STRIPE_DEGRADED, // 降级
- STRIPE_BIT_DELAY, // 等待bitmap处理
- STRIPE_EXPANDING, //
- STRIPE_EXPAND_SOURCE, //
- STRIPE_EXPAND_READY, //
- STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */ // IO已下发
- STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */ // 满写
- STRIPE_BIOFILL_RUN, // bio填充,就是将page页拷贝到bio
- STRIPE_COMPUTE_RUN, // 运行计算
- STRIPE_OPS_REQ_PENDING, // handle_stripe排队用
- STRIPE_ON_UNPLUG_LIST, // 批量release_stripe时标识是否加入unplug链表
- };
- 3198 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
- 3199 {
- 3200 struct r5conf *conf = sh->raid_conf;
- 3201 int disks = sh->disks;
- 3202 struct r5dev *dev;
- 3203 int i;
- 3204 int do_recovery = 0;
- 3205
- 3206 memset(s, 0, sizeof(*s));
- 3207
- 3208 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- 3209 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
- 3210 s->failed_num[0] = -1;
- 3211 s->failed_num[1] = -1;
- 3212
- 3213 /* Now to look around and see what can be done */
- 3214 rcu_read_lock();
- 3215 for (i=disks; i--; ) {
- 3216 struct md_rdev *rdev;
- 3217 sector_t first_bad;
- 3218 int bad_sectors;
- 3219 int is_bad = 0;
- 3220
- 3221 dev = &sh->dev[i];
- 3222
- 3223 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- 3224 i, dev->flags,
- 3225 dev->toread, dev->towrite, dev->written);
- struct r5dev {
- /* rreq and rvec are used for the replacement device when
- * writing data to both devices.
- */
- struct bio req, rreq;
- struct bio_vec vec, rvec;
- struct page *page;
- struct bio *toread, *read, *towrite, *written;
- sector_t sector; /* sector of this page */
- unsigned long flags;
- } dev[1]; /* allocated with extra space depending of RAID geometry */
- 3226 /* maybe we can reply to a read
- 3227 *
- 3228 * new wantfill requests are only permitted while
- 3229 * ops_complete_biofill is guaranteed to be inactive
- 3230 */
- 3231 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
- 3232 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
- 3233 set_bit(R5_Wantfill, &dev->flags);
- 3234
- 3235 /* now count some things */
- 3236 if (test_bit(R5_LOCKED, &dev->flags))
- 3237 s->locked++;
- 3238 if (test_bit(R5_UPTODATE, &dev->flags))
- 3239 s->uptodate++;
- 3240 if (test_bit(R5_Wantcompute, &dev->flags)) {
- 3241 s->compute++;
- 3242 BUG_ON(s->compute > 2);
- 3243 }
- 3244
- 3245 if (test_bit(R5_Wantfill, &dev->flags))
- 3246 s->to_fill++;
- 3247 else if (dev->toread)
- 3248 s->to_read++;
- 3249 if (dev->towrite) {
- 3250 s->to_write++;
- 3251 if (!test_bit(R5_OVERWRITE, &dev->flags))
- 3252 s->non_overwrite++;
- 3253 }
- 3254 if (dev->written)
- 3255 s->written++;
- 3256 /* Prefer to use the replacement for reads, but only
- 3257 * if it is recovered enough and has no bad blocks.
- 3258 */
- 3259 rdev = rcu_dereference(conf->disks[i].replacement);
- 3260 if (rdev && !test_bit(Faulty, &rdev->flags) &&
- 3261 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
- 3262 !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
- 3263 &first_bad, &bad_sectors))
- 3264 set_bit(R5_ReadRepl, &dev->flags);
- 3265 else {
- 3266 if (rdev)
- 3267 set_bit(R5_NeedReplace, &dev->flags);
- 3268 rdev = rcu_dereference(conf->disks[i].rdev);
- 3269 clear_bit(R5_ReadRepl, &dev->flags);
- 3270 }
- 3271 if (rdev && test_bit(Faulty, &rdev->flags))
- 3272 rdev = NULL;
- 3273 if (rdev) {
- 3274 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
- 3275 &first_bad, &bad_sectors);
- 3276 if (s->blocked_rdev == NULL
- 3277 && (test_bit(Blocked, &rdev->flags)
- 3278 || is_bad < 0)) {
- 3279 if (is_bad < 0)
- 3280 set_bit(BlockedBadBlocks,
- 3281 &rdev->flags);
- 3282 s->blocked_rdev = rdev;
- 3283 atomic_inc(&rdev->nr_pending);
- 3284 }
- 3285 }
- 3468 /* Now we might consider reading some blocks, either to check/generate
- 3469 * parity, or to satisfy requests
- 3470 * or to load a block that is being partially written.
- 3471 */
- 3472 if (s.to_read || s.non_overwrite
- 3473 || (conf->level == 6 && s.to_write && s.failed)
- 3474 || (s.syncing && (s.uptodate + s.compute < disks))
- 3475 || s.replacing
- 3476 || s.expanding)
- 3477 handle_stripe_fill(sh, &s, disks);
- 2707 /**
- 2708 * handle_stripe_fill - read or compute data to satisfy pending requests.
- 2709 */
- 2710 static void handle_stripe_fill(struct stripe_head *sh,
- 2711 struct stripe_head_state *s,
- 2712 int disks)
- 2713 {
- 2714 int i;
- 2715
- 2716 /* look for blocks to read/compute, skip this if a compute
- 2717 * is already in flight, or if the stripe contents are in the
- 2718 * midst of changing due to a write
- 2719 */
- 2720 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- 2721 !sh->reconstruct_state)
- 2722 for (i = disks; i--; )
- 2723 if (fetch_block(sh, s, i, disks))
- 2724 break;
- 2725 set_bit(STRIPE_HANDLE, &sh->state);
- 2726 }
2619 * to be read or computed to satisfy a request.
2620 *
2621 * Returns 1 when no more member devices need to be checked, otherwise returns
2622 * 0 to tell the loop in handle_stripe_fill to continue
2623 */
- 2624 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
- 2625 int disk_idx, int disks)
- 2626 {
- 2627 struct r5dev *dev = &sh->dev[disk_idx];
- 2628 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
- 2629 &sh->dev[s->failed_num[1]] };
- 2630
- 2631 /* is the data in this block needed, and can we get it? */
- 2632 if (!test_bit(R5_LOCKED, &dev->flags) &&
- 2633 !test_bit(R5_UPTODATE, &dev->flags) &&
- 2634 (dev->toread ||
- 2635 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- 2636 s->syncing || s->expanding ||
- 2637 (s->replacing && want_replace(sh, disk_idx)) ||
- 2638 (s->failed >= 1 && fdev[0]->toread) ||
- 2639 (s->failed >= 2 && fdev[1]->toread) ||
- 2640 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
- 2641 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
- 2642 (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
- 2643 /* we would like to get this block, possibly by computing it,
- 2644 * otherwise read it if the backing disk is insync
- 2645 */
- 2646 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
- 2647 BUG_ON(test_bit(R5_Wantread, &dev->flags));
- 2648 if ((s->uptodate == disks - 1) &&
- 2649 (s->failed && (disk_idx == s->failed_num[0] ||
- 2650 disk_idx == s->failed_num[1]))) {
- 2651 /* have disk failed, and we're requested to fetch it;
- 2652 * do compute it
- 2653 */
- 2654 pr_debug("Computing stripe %llu block %d\n",
- 2655 (unsigned long long)sh->sector, disk_idx);
- 2656 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
- 2657 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
- 2658 set_bit(R5_Wantcompute, &dev->flags);
- 2659 sh->ops.target = disk_idx;
- 2660 sh->ops.target2 = -1; /* no 2nd target */
- 2661 s->req_compute = 1;
- 2662 /* Careful: from this point on 'uptodate' is in the eye
- 2663 * of raid_run_ops which services 'compute' operations
- 2664 * before writes. R5_Wantcompute flags a block that will
- 2665 * be R5_UPTODATE by the time it is needed for a
- 2666 * subsequent operation.
- 2667 */
- 2668 s->uptodate++;
- 2669 return 1;
- 2670 } else if (s->uptodate == disks-2 && s->failed >= 2) {
- 2671 /* Computing 2-failure is *very* expensive; only
- 2672 * do it if failed >= 2
- 2673 */
- 2674 int other;
- 2675 for (other = disks; other--; ) {
- 2676 if (other == disk_idx)
- 2677 continue;
- 2678 if (!test_bit(R5_UPTODATE,
- 2679 &sh->dev[other].flags))
- 2680 break;
- 2681 }
- 2682 BUG_ON(other < 0);
- 2683 pr_debug("Computing stripe %llu blocks %d,%d\n",
- 2684 (unsigned long long)sh->sector,
- 2685 disk_idx, other);
- 2686 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
- 2687 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
- 2688 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
- 2689 set_bit(R5_Wantcompute, &sh->dev[other].flags);
- 2690 sh->ops.target = disk_idx;
- 2691 sh->ops.target2 = other;
- 2692 s->uptodate += 2;
- 2693 s->req_compute = 1;
- 2694 return 1;
- 2695 } else if (test_bit(R5_Insync, &dev->flags)) {
- 2696 set_bit(R5_LOCKED, &dev->flags);
- 2697 set_bit(R5_Wantread, &dev->flags);
- 2698 s->locked++;
- 2699 pr_debug("Reading block %d (sync=%d)\n",
- 2700 disk_idx, s->syncing);
- 2701 }
- 2702 }
- 2703
- 2704 return 0;
- 2705 }
- 3673 ops_run_io(sh, &s);
我们再跟进这个函数,为了突出重点,这里只列出跟同步相关的代码:
- 537 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
- 538 {
- 539 struct r5conf *conf = sh->raid_conf;
- 540 int i, disks = sh->disks;
- 541
- 542 might_sleep();
- 543
- 544 for (i = disks; i--; ) {
- 545 int rw;
- 546 int replace_only = 0;
- 547 struct bio *bi, *rbi;
- 548 struct md_rdev *rdev, *rrdev = NULL;
- ...
- 554 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- 555 rw = READ;
- ...
- 560 } else
- 561 continue;
- 564
- 565 bi = &sh->dev[i].req;
- 566 rbi = &sh->dev[i].rreq; /* For writing to replacement */
- 567
- 568 bi->bi_rw = rw;
- 569 rbi->bi_rw = rw;
- 570 if (rw & WRITE) {
- 573 } else
- 574 bi->bi_end_io = raid5_end_read_request;
- 575
- 576 rcu_read_lock();
- 577 rrdev = rcu_dereference(conf->disks[i].replacement);
- 578 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
- 579 rdev = rcu_dereference(conf->disks[i].rdev);
- 580 if (!rdev) {
- 581 rdev = rrdev;
- 582 rrdev = NULL;
- 583 }
- ...
- 598 if (rdev)
- 599 atomic_inc(&rdev->nr_pending);
- ...
- 604 rcu_read_unlock();
- ...
- 643 if (rdev) {
- 644 if (s->syncing || s->expanding || s->expanded
- 645 || s->replacing)
- 646 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
- 647
- 648 set_bit(STRIPE_IO_STARTED, &sh->state);
- 649
- 650 bi->bi_bdev = rdev->bdev;
- 651 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
- 652 __func__, (unsigned long long)sh->sector,
- 653 bi->bi_rw, i);
- 654 atomic_inc(&sh->count);
- 655 if (use_new_offset(conf, sh))
- 656 bi->bi_sector = (sh->sector
- 657 + rdev->new_data_offset);
- 658 else
- 659 bi->bi_sector = (sh->sector
- 660 + rdev->data_offset);
- 661 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
- 662 bi->bi_rw |= REQ_FLUSH;
- 663
- 664 bi->bi_flags = 1 << BIO_UPTODATE;
- 665 bi->bi_idx = 0;
- 666 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
- 667 bi->bi_io_vec[0].bv_offset = 0;
- 668 bi->bi_size = STRIPE_SIZE;
- 669 bi->bi_next = NULL;
- 670 if (rrdev)
- 671 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
- 672 generic_make_request(bi);
- 673 }
- ...
- 709 }
- 710 }
- 1710 static void raid5_end_read_request(struct bio * bi, int error)
- 1711 {
- ...
- 1824 rdev_dec_pending(rdev, conf->mddev);
- 1825 clear_bit(R5_LOCKED, &sh->dev[i].flags);
- 1826 set_bit(STRIPE_HANDLE, &sh->state);
- 1827 release_stripe(sh);
- 1828 }
- 3528 if (sh->check_state ||
- 3529 (s.syncing && s.locked == 0 &&
- 3530 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
- 3531 !test_bit(STRIPE_INSYNC, &sh->state))) {
- 3532 if (conf->level == 6)
- 3533 handle_parity_checks6(conf, sh, &s, disks);
- 3534 else
- 3535 handle_parity_checks5(conf, sh, &s, disks);
- 3536 }
- 2881 switch (sh->check_state) {
- 2882 case check_state_idle:
- 2883 /* start a new check operation if there are no failures */
- 2884 if (s->failed == 0) {
- 2885 BUG_ON(s->uptodate != disks);
- 2886 sh->check_state = check_state_run;
- 2887 set_bit(STRIPE_OP_CHECK, &s->ops_request);
- 2888 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
- 2889 s->uptodate--;
- 2890 break;
- 2891 }
- 1412 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
- 1413 if (sh->check_state == check_state_run)
- 1414 ops_run_check_p(sh, percpu);
ops_run_check_p校验条带是否同步,对应的回调函数为:
- 1301static void ops_complete_check(void *stripe_head_ref)
- 1302{
- 1303 struct stripe_head *sh = stripe_head_ref;
- 1304
- 1305 pr_debug("%s: stripe %llu\n", __func__,
- 1306 (unsigned long long)sh->sector);
- 1307
- 1308 sh->check_state = check_state_check_result;
- 1309 set_bit(STRIPE_HANDLE, &sh->state);
- 1310 release_stripe(sh);
- 1311}
- 2916 case check_state_check_result:
- 2917 sh->check_state = check_state_idle;
- 2918
- 2919 /* if a failure occurred during the check operation, leave
- 2920 * STRIPE_INSYNC not set and let the stripe be handled again
- 2921 */
- 2922 if (s->failed)
- 2923 break;
- 2924
- 2925 /* handle a successful check operation, if parity is correct
- 2926 * we are done. Otherwise update the mismatch count and repair
- 2927 * parity if !MD_RECOVERY_CHECK
- 2928 */
- 2929 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
- 2930 /* parity is correct (on disc,
- 2931 * not in buffer any more)
- 2932 */
- 2933 set_bit(STRIPE_INSYNC, &sh->state);
- 2934 else {
- 2935 conf->mddev->resync_mismatches += STRIPE_SECTORS;
- 2936 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- 2937 /* don't try to repair!! */
- 2938 set_bit(STRIPE_INSYNC, &sh->state);
- 2939 else {
- 2940 sh->check_state = check_state_compute_run;
- 2941 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
- 2942 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
- 2943 set_bit(R5_Wantcompute,
- 2944 &sh->dev[sh->pd_idx].flags);
- 2945 sh->ops.target = sh->pd_idx;
- 2946 sh->ops.target2 = -1;
- 2947 s->uptodate++;
- 2948 }
- 2949 }
- 2950 break;
- 3550 if ((s.syncing || s.replacing) && s.locked == 0 &&
- 3551 test_bit(STRIPE_INSYNC, &sh->state)) {
- 3552 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
- 3553 clear_bit(STRIPE_SYNCING, &sh->state);
- 3554 }
如果条带未同步,那带着STRIPE_OP_COMPUTE_BLK标志来到了raid_run_ops函数,该函数调用__raid_run_ops:
- 1383 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
- 1384 if (level < 6)
- 1385 tx = ops_run_compute5(sh, percpu);
最终调用ops_run_compute5函数计算出条带中校验盘的值,该函数回调函数ops_complete_compute:
- 856static void ops_complete_compute(void *stripe_head_ref)
- 857{
- 858 struct stripe_head *sh = stripe_head_ref;
- 859
- 860 pr_debug("%s: stripe %llu\n", __func__,
- 861 (unsigned long long)sh->sector);
- 862
- 863 /* mark the computed target(s) as uptodate */
- 864 mark_target_uptodate(sh, sh->ops.target);
- 865 mark_target_uptodate(sh, sh->ops.target2);
- 866
- 867 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
- 868 if (sh->check_state == check_state_compute_run)
- 869 sh->check_state = check_state_compute_result;
- 870 set_bit(STRIPE_HANDLE, &sh->state);
- 871 release_stripe(sh);
- 872}
- 2894 case check_state_compute_result:
- 2895 sh->check_state = check_state_idle;
- 2896 if (!dev)
- 2897 dev = &sh->dev[sh->pd_idx];
- 2898
- 2899 /* check that a write has not made the stripe insync */
- 2900 if (test_bit(STRIPE_INSYNC, &sh->state))
- 2901 break;
- 2902
- 2903 /* either failed parity check, or recovery is happening */
- 2904 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
- 2905 BUG_ON(s->uptodate != disks);
- 2906
- 2907 set_bit(R5_LOCKED, &dev->flags);
- 2908 s->locked++;
- 2909 set_bit(R5_Wantwrite, &dev->flags);
- 2910
- 2911 clear_bit(STRIPE_DEGRADED, &sh->state);
- 2912 set_bit(STRIPE_INSYNC, &sh->state);
- 2913 break;
linux内核奇遇记之md源代码解读之十raid5数据流之同步数据流程相关推荐
- linux内核奇遇记之md源代码解读之十二raid读写
linux内核奇遇记之md源代码解读之十二raid读写 转载请注明出处:http://blog.csdn.net/liumangxiong 我们都知道,对一个linux块设备来说,都有一个对应的请求队 ...
- linux内核奇遇记之md源代码解读之八阵列同步二
linux内核奇遇记之md源代码解读之八阵列同步二 转载请注明出处:http://blog.csdn.net/liumangxiong 在上一小节里讲到启动同步线程: 7824 mddev->s ...
- 复制linux内核,linux内核写时复制机制源代码解读
作者简介 写时复制技术(一下简称COW)是linux内核比较重要的一种机制,我们都知道:父进程fork子进程的时候,子进程会和父进程会以只读的方式共享所有私有的可写页,当有一方将要写的时候会发生COW ...
- 《Linux内核设计与实现》读书笔记(十九)- 可移植性
linux内核的移植性非常好, 目前的内核也支持非常多的体系结构(有20多个). 但是刚开始时, linux也只支持 intel i386 架构, 从 v1.2版开始支持 Digital Alpha, ...
- 高通linux内核目录,高通 android 源代码以及目标系统目录结构
下面为高通android源代码结构 build/ – Build 环境建立和makefiles生成4 bionic/ – Android C 库 dalvik/ – Android Java 虚拟机 ...
- linux内核页高速缓存,《Linux内核设计与实现》读书笔记(十六)- 页高速缓存和页回写(示例代码)...
主要内容: 缓存简介 页高速缓存 页回写 1. 缓存简介 在编程中,缓存是很常见也很有效的一种提高程序性能的机制. linux内核也不例外,为了提高I/O性能,也引入了缓存机制,即将一部分磁盘上的数据 ...
- 《Linux内核设计与实现》读书笔记(十)- 内核同步方法【转】
转自:http://www.cnblogs.com/wang_yb/archive/2013/05/01/3052865.html 内核中提供了多种方法来防止竞争条件,理解了这些方法的使用场景有助于我 ...
- 《Linux内核设计与实现》读书笔记(十四)- 块I/O层
最近太忙,居然过了2个月才更新第十四章.... 主要内容: 块设备简介 内核访问块设备的方法 内核I/O调度程序 1. 块设备简介 I/O设备主要有2类: 字符设备:只能顺序读写设备中的内容,比如 串 ...
- 《Linux内核设计与实现》读书笔记(十八)- 内核调试
内核调试的难点在于它不能像用户态程序调试那样打断点,随时暂停查看各个变量的状态. 也不能像用户态程序那样崩溃后迅速的重启,恢复初始状态. 用户态程序和内核交互,用户态程序的各种状态,错误等可以由内核来 ...
- 《Linux内核设计与实现》读书笔记(十六)- 页高速缓存和页回写
主要内容: 缓存简介 页高速缓存 页回写 1. 缓存简介 在编程中,缓存是很常见也很有效的一种提高程序性能的机制. linux内核也不例外,为了提高I/O性能,也引入了缓存机制,即将一部分磁盘上的数据 ...
最新文章
- 使用C++ stringstream来进行数据类型转换
- python函数可选参数传递_Python中函数的参数传递
- 实战SSM_O2O商铺_23【商铺列表】Controller层开发
- calibre中的hcell_关于calibre的Hcell你知道多少?
- Windows API一日一练(2)使用应用程序句柄
- Javascript 动态修改select方法大全【转】
- 我终于知道post和get的区别
- 5月份Github上最热门的数据科学和机器学习项目
- 四大招让无处不在的工作空间成为可能?揭秘Ivanti 的战略布局
- vue-router路由安装与使用
- 的正确使用_如何正确使用隔离霜
- 简单的网页编辑器js代码
- 微信网页授获取code
- 企业要如何利用360评估法做好人才盘点?
- 区分计算机网络和互联网的概念,网络的概念,网络与互联网的区别
- Candence学习篇(5)使用Padstack Editor制作贴片焊盘和通孔焊盘
- 大数据面试常见问题(三)——Hadoop部分
- 区块链再度走入沉寂期,下一个撬动行业的支点会在哪里?
- Android Studio Dolphin 稳定版正式发布
- mac下如何配搭建配置自己的svn