1087 git init —bare study.git <=== crate study.git 1088 ls study.git/ 1089 cat study.git/config 1090 ls 1091 git clone study.git/ 1092 ls 1093 cd study 1094 ls 1095 touch study.readme 1096 echo “This is readme for study(master)” 1097 echo “This is readme for study(master)” >>study.readme 1098 git add study.readme 1099 git commit -s study.readme <===== commit a file for test. 1100 git push origin master <===== push them to the server(locally). 1101 cd .. 1102 ls
##create a remote branch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
1103 git clone study.git/ tmp1 1104 cd tmp1/ 1105 ls 1106 git log 1107 cd .. 1108 ls 1109 cd study 1110 ls 1111 git checkout -b dev_junwei <======== create a locally branch. 1112 ls 1113 vim study.readme 1114 ls 1115 git commit study.readme 1116 git mv study.readme study.dev.junwei.readme 1117 git push origin dev_junwei <======== create a remote branch. and remote branch has same name with the local one. 1118 git branch
##create a mirror for the study.git
1
git clone —bare study.git/ mirror.git
##Add a new remote git resp.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
1134 git remote add mirror /home/junwei/git_study/mirror.git/ <=== add a new remote and name it as mirror. 1136 git remote show 1137 git remote show origin 1138 git remote show mirror 1139 git status 1140 git fetch mirror <===== !!!! important, Only thus following checkout could be sucess
1141 git checkout -b m_master mirror/master <=== create local branch according remote branch. 1142 git checkout -b m_dev mirror/dev_junwei
1164 git commit -sa <=== commit a change to local branch.
1168 git push mirror m_dev:dev_junwei <=== push the local change to remote branch. "m_dev" is created in 1142.
1174 git push mirror m_dev:dev_junwei <=== do some change/commit and push again to remote mirror. FU CK( NOT m_dev/dev_junwei)!!!!!!!
staticvoid __init bootmem_init(void) { unsignedlong reserved_end; unsignedlong mapstart = ~0UL; unsignedlong bootmap_size; int i; /* * Init any data related to initrd. It's a nop if INITRD is * not selected. Once that done we can determine the low bound * of usable memory. */ reserved_end = max(init_initrd(), (unsignedlong) PFN_UP(__pa_symbol(&_end))); /* * max_low_pfn is not a number of pages. The number of pages * of the system is given by 'max_low_pfn - min_low_pfn'. */ min_low_pfn = ~0UL; max_low_pfn = 0; /* * Find the highest page frame number we have available. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsignedlong start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (end > max_low_pfn) max_low_pfn = end; if (start < min_low_pfn) min_low_pfn = start; if (end <= reserved_end) continue; if (start >= mapstart) continue; mapstart = max(reserved_end, start); } if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { pr_info("Wasting %lu bytes for tracking %lu unused pages\n", (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), min_low_pfn - ARCH_PFN_OFFSET); } elseif (min_low_pfn < ARCH_PFN_OFFSET) { pr_info("%lu free pages won't be used\n", ARCH_PFN_OFFSET - min_low_pfn); } min_low_pfn = ARCH_PFN_OFFSET; /* * Determine low and high memory ranges */ max_pfn = max_low_pfn; if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); highend_pfn = max_low_pfn; #endif max_low_pfn = PFN_DOWN(HIGHMEM_START); } /* * Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, min_low_pfn, max_low_pfn); for (i = 0; i < boot_mem_map.nr_map; i++) { unsignedlong start, end; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (start <= min_low_pfn) start = min_low_pfn; if (start >= end) continue; #ifndef CONFIG_HIGHMEM if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; #endif add_active_range(0, start, end); } /* * Register fully available low RAM pages with the bootmem allocator. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsignedlong start, end, size; /* * Reserve usable memory. */ if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); /* * We are rounding up the start address of usable memory * and at the end of the usable range downwards. */ if (start >= max_low_pfn) continue; if (start < reserved_end) start = reserved_end; if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; size = end - start; /* Register lowmem ranges */ free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); memory_present(0, start, end); } /* * Reserve the bootmap memory. */ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); /* * Reserve initrd memory if needed. */ finalize_initrd(); } structnode_active_region { unsignedlong start_pfn; unsignedlong end_pfn; int nid; }; mm/page_alloc.c =================== staticstruct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; /** * add_active_range - Register a range of PFNs backed by physical memory * @nid: The node ID the range resides on * @start_pfn: The start PFN of the available physical memory * @end_pfn: The end PFN of the available physical memory * * These ranges are stored in an early_node_map[] and later used by * free_area_init_nodes() to calculate zone sizes and holes. If the * range spans a memory hole, it is up to the architecture to ensure * the memory is not freed by the bootmem allocator. If possible * the range being registered will be merged with existing ranges. */ void __init add_active_range(unsignedint nid, unsignedlong start_pfn, unsignedlong end_pfn) { int i; mminit_dprintk(MMINIT_TRACE, "memory_register", "Entering add_active_range(%d, %#lx, %#lx) " "%d entries of %d used\n", nid, start_pfn, end_pfn, nr_nodemap_entries, MAX_ACTIVE_REGIONS); mminit_validate_memmodel_limits(&start_pfn, &end_pfn); /* Merge with existing active regions if possible */ for (i = 0; i < nr_nodemap_entries; i++) { if (early_node_map[i].nid != nid) continue; /* Skip if an existing region covers this new one */ if (start_pfn >= early_node_map[i].start_pfn && end_pfn <= early_node_map[i].end_pfn) return; /* Merge forward if suitable */ if (start_pfn <= early_node_map[i].end_pfn && end_pfn > early_node_map[i].end_pfn) { early_node_map[i].end_pfn = end_pfn; return; } /* Merge backward if suitable */ if (start_pfn < early_node_map[i].start_pfn && end_pfn >= early_node_map[i].start_pfn) { early_node_map[i].start_pfn = start_pfn; return; } } /* Check that early_node_map is large enough */ if (i >= MAX_ACTIVE_REGIONS) { printk(KERN_CRIT "More than %d memory regions, truncating\n", MAX_ACTIVE_REGIONS); return; } early_node_map[i].nid = nid; early_node_map[i].start_pfn = start_pfn; early_node_map[i].end_pfn = end_pfn; nr_nodemap_entries = i + 1; } arch/mips/mm/init.c ==================================== void __init paging_init(void) { unsignedlong max_zone_pfns[MAX_NR_ZONES]; unsignedlong lastpfn __maybe_unused; pagetable_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif kmap_coherent_init(); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; lastpfn = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; lastpfn = highend_pfn; if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { printk(KERN_WARNING "This processor doesn't support highmem." " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; lastpfn = max_low_pfn; } #endif free_area_init_nodes(max_zone_pfns); } #ifdef CONFIG_64BIT staticstructkcore_listkcore_kseg0; #endif arch/mips/kernel.c ================================ void __init setup_arch(char **cmdline_p) { cpu_probe(); prom_init();<===== #ifdef CONFIG_EARLY_PRINTK setup_early_printk(); #endif cpu_report(); check_bugs_early(); #if defined(CONFIG_VT) #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif arch_mem_init(cmdline_p);<===== resource_init(); plat_smp_setup(); }
/** * register_pernet_subsys - register a network namespace subsystem * @ops: pernet operations structure for the subsystem * * Register a subsystem which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */
When we compile a glibc(or eglibc), we need generated the timezone data file with it. although, it is stable and no change almost in every version update.
Today a problem is met about it.
We use the old glibc’s timezone file, which is used by many different toolchain for several paltforms.
unfortunately.the data file has been change after 2007 year by GNU official. but I did not found the exact version(date) of glibc, which change the timezone data file.
###af_key.c linux kernel provide 3 method to manager SA/SP, such as add/del/flush/dump SAs/SPs.
pf_key socket.
netlink message.
socket option.
The af_key.c implement the pf_key socket.
###part 1. pf_key socket defination about socket opertion. important function is pfkey_create,pfkey_sendmsg,pfkey_recvmsg, pfkey_release,datagram_poll,
in kernel 3.0, pf_key message format A traditional TLV format.
header + (extenion-header + extention_value)*n
The header is sadb_msg. extention header is sadb_ext. extention value is different according the extention header. Such as sadb_sa,sadb_x_policy and so on.
The application program(such as setkey) sent a command to kernel by sendmsg system API. Thus in kernel pf_key will call pfkey_sendmsg. pfkey_sendmsg will call pfkey_get_base_msg to do some simple check, and then call pfkey_process.
pfkey_process will first pfkey_broadcast, then divid the extention message to a pointer array one by one. void *ext_hdrs\[SADB_EXT_MAX\]; SADB_EXT_SA —-> SADB_EXT_ADDRESS_SRC—-> SADB_EXT_ADDRESS_DST—-> this pointer array will be used by the following handler.
and then call the pfkey_handler according the sadb_msg_type in the pf_key messag header.