Get netdevice stat

##call trace

the ‘.show’ method in device_attribute will call the netstat_show,
some of the driver read the part of stat from NIC register,
but most count the stat by software in dev->stat

1
2
3
4
5
> dev_get_stats
> > dev_get_stats
> > > ops = dev->netdev_ops;
> > > > ops->ndo_get_stats64
> > > > > ixgbe_get_stats64,

Read More

git study summary

##create repo

1
2
3
4
5
6
7
8
9
10
11
12
13
1087 git init —bare study.git <=== crate study.git 
1088 ls study.git/ 1089 cat study.git/config
1090 ls 1091 git clone study.git/
1092 ls
1093 cd study
1094 ls
1095 touch study.readme
1096 echo “This is readme for study(master)”
1097 echo “This is readme for study(master)” >>study.readme
1098 git add study.readme
1099 git commit -s study.readme <===== commit a file for test.
1100 git push origin master <===== push them to the server(locally).
1101 cd .. 1102 ls

##create a remote branch.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
1103 git clone study.git/ tmp1 
1104 cd tmp1/
1105 ls
1106 git log
1107 cd ..
1108 ls
1109 cd study
1110 ls
1111 git checkout -b dev_junwei <======== create a locally branch.
1112 ls
1113 vim study.readme
1114 ls
1115 git commit study.readme
1116 git mv study.readme study.dev.junwei.readme
1117 git push origin dev_junwei <======== create a remote branch. and remote branch has same name with the local one.
1118 git branch

##create a mirror for the study.git

1
git clone —bare study.git/ mirror.git

##Add a new remote git resp.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
1134  git remote add mirror /home/junwei/git_study/mirror.git/  <=== add a new remote and name it as mirror.
1136 git remote show
1137 git remote show origin
1138 git remote show mirror
1139 git status
1140 git fetch mirror <===== !!!! important, Only thus following checkout could be sucess

1141 git checkout -b m_master mirror/master <=== create local branch according remote branch.
1142 git checkout -b m_dev mirror/dev_junwei

1164 git commit -sa <=== commit a change to local branch.

1168 git push mirror m_dev:dev_junwei <=== push the local change to remote branch. "m_dev" is created in 1142.

1174 git push mirror m_dev:dev_junwei <=== do some change/commit and push again to remote mirror.
FU CK( NOT m_dev/dev_junwei)!!!!!!!

1176 git status

git config push.default tracking 来让git push命令默认push当前的分支到对应的remote tracking分支上

1
2
3
4
5
6
7
8
[junwei@junwei study]$ git config push.default tracking
[junwei@junwei study]$ cat .git/config
....
[push]
default = tracking
[junwei@junwei study]$

git push origin :remote_branch_name
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[martin@fc16 example]$ git log --oneline  
7f9df38 This file should only be seen on branch 'dev'.
5679bd2 fill a line in readme file on branch 'dev'.
3d83de1 Add an empty readme
[martin@fc16 example]$
[martin@fc16 example]$ git tag init 3d83de1
[martin@fc16 example]$ git tag v1.0
[martin@fc16 example]$
[martin@fc16 example]$ git tag
init
v1.0
[martin@fc16 example]$ git push
Everything up-to-date
[martin@fc16 example]$ git push --tags
Total 0 (delta 0), reused 0 (delta 0)
To /var/lib/git/test/example.git/
* [new tag] init -> init
* [new tag] v1.0 -> v1.0
[martin@fc16 example]$

##delete remote tags.

1
2
3
4
[martin@fc16 example]$ git push  origin  :v1.a0
To /var/lib/git/test/example.git/
- [deleted] v1.a0
[martin@fc16 example]$

softirq

为什么需要软中断

引入软中断的目的是为了中断要尽快返回。 把一些不紧急的工作放到下半部里执行。 软中断是下半部的一种实现方式。其他的还有tasklet和workqueue。

软中断特点

软中断特点是可以同时运行在CPU上,而tasklet则是整个系统中只有一个。 软中断和tasklet都不允许睡眠,因此必须避免执行引发睡眠的操作。 workqueue则允许睡眠。

Read More

the mem_init on mips(octean)

arch/mips/kernel/setup.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
static void __init bootmem_init(void)
{
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
int i;

/*
* Init any data related to initrd. It's a nop if INITRD is
* not selected. Once that done we can determine the low bound
* of usable memory.
*/
reserved_end = max(init_initrd(),
(unsigned long) PFN_UP(__pa_symbol(&_end)));

/*
* max_low_pfn is not a number of pages. The number of pages
* of the system is given by 'max_low_pfn - min_low_pfn'.
*/
min_low_pfn = ~0UL;
max_low_pfn = 0;

/*
* Find the highest page frame number we have available.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;

if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
continue;

start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);

if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
if (end <= reserved_end)
continue;
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
}

if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
min_low_pfn - ARCH_PFN_OFFSET);
} else if (min_low_pfn < ARCH_PFN_OFFSET) {
pr_info("%lu free pages won't be used\n",
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;

/*
* Determine low and high memory ranges
*/
max_pfn = max_low_pfn;
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
highend_pfn = max_low_pfn;
#endif
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}

/*
* Initialize the boot-time allocator with low memory only.
*/
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn);


for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;

start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);

if (start <= min_low_pfn)
start = min_low_pfn;
if (start >= end)
continue;

#ifndef CONFIG_HIGHMEM
if (end > max_low_pfn)
end = max_low_pfn;

/*
* ... finally, is the area going away?
*/
if (end <= start)
continue;
#endif

add_active_range(0, start, end);
}

/*
* Register fully available low RAM pages with the bootmem allocator.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end, size;

/*
* Reserve usable memory.
*/
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
continue;

start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
/*
* We are rounding up the start address of usable memory
* and at the end of the usable range downwards.
*/
if (start >= max_low_pfn)
continue;
if (start < reserved_end)
start = reserved_end;
if (end > max_low_pfn)
end = max_low_pfn;

/*
* ... finally, is the area going away?
*/
if (end <= start)
continue;
size = end - start;

/* Register lowmem ranges */
free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
memory_present(0, start, end);
}

/*
* Reserve the bootmap memory.
*/
reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);

/*
* Reserve initrd memory if needed.
*/
finalize_initrd();
}



struct node_active_region {
unsigned long start_pfn;
unsigned long end_pfn;
int nid;
};



mm/page_alloc.c
===================
static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];



/**
* add_active_range - Register a range of PFNs backed by physical memory
* @nid: The node ID the range resides on
* @start_pfn: The start PFN of the available physical memory
* @end_pfn: The end PFN of the available physical memory
*
* These ranges are stored in an early_node_map[] and later used by
* free_area_init_nodes() to calculate zone sizes and holes. If the
* range spans a memory hole, it is up to the architecture to ensure
* the memory is not freed by the bootmem allocator. If possible
* the range being registered will be merged with existing ranges.
*/
void __init add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
int i;

mminit_dprintk(MMINIT_TRACE, "memory_register",
"Entering add_active_range(%d, %#lx, %#lx) "
"%d entries of %d used\n",
nid, start_pfn, end_pfn,
nr_nodemap_entries, MAX_ACTIVE_REGIONS);

mminit_validate_memmodel_limits(&start_pfn, &end_pfn);

/* Merge with existing active regions if possible */
for (i = 0; i < nr_nodemap_entries; i++) {
if (early_node_map[i].nid != nid)
continue;

/* Skip if an existing region covers this new one */
if (start_pfn >= early_node_map[i].start_pfn &&
end_pfn <= early_node_map[i].end_pfn)
return;

/* Merge forward if suitable */
if (start_pfn <= early_node_map[i].end_pfn &&
end_pfn > early_node_map[i].end_pfn) {
early_node_map[i].end_pfn = end_pfn;
return;
}

/* Merge backward if suitable */
if (start_pfn < early_node_map[i].start_pfn &&
end_pfn >= early_node_map[i].start_pfn) {
early_node_map[i].start_pfn = start_pfn;
return;
}
}

/* Check that early_node_map is large enough */
if (i >= MAX_ACTIVE_REGIONS) {
printk(KERN_CRIT "More than %d memory regions, truncating\n",
MAX_ACTIVE_REGIONS);
return;
}

early_node_map[i].nid = nid;
early_node_map[i].start_pfn = start_pfn;
early_node_map[i].end_pfn = end_pfn;
nr_nodemap_entries = i + 1;
}





arch/mips/mm/init.c
====================================


void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long lastpfn __maybe_unused;

pagetable_init();

#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
kmap_coherent_init();

#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
lastpfn = highend_pfn;

if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
lastpfn = max_low_pfn;
}
#endif

free_area_init_nodes(max_zone_pfns);
}

#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif


arch/mips/kernel.c
================================
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
prom_init();<=====

#ifdef CONFIG_EARLY_PRINTK
setup_early_printk();
#endif
cpu_report();
check_bugs_early();

#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif

arch_mem_init(cmdline_p);<=====

resource_init();
plat_smp_setup();
}

neighbour 学习笔记(kernel 3.0)

  1. For ethernet, dev->header_ops is eth_header_ops
    1
    2
    3
    4
    5
     936 static int __devinit e1000_probe(struct pci_dev *pdev,          
    937 const struct pci_device_id *ent)
    938
    ...
    973 netdev = alloc_etherdev(sizeof(struct e1000_adapter));

include/linux/etherdevice.h

1
2
53 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
54 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)

net/ethernet/eth.c

1
2
3
4
5
365 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
366 unsigned int rxqs)
367 {
368 return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
369 }

net/core/dev.c

1
2
3
4
5
6
7
8
5821 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5822 void (*setup)(struct net_device *),
5823 unsigned int txqs, unsigned int rxqs)
...
5880 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5881 setup(dev); <===
5882
5883 dev->num_tx_queues = txqs;

net/ethernet/eth.c

1
2
3
4
5
6
7
8
9
10
11
12
13
334 void ether_setup(struct net_device *dev)
336 dev->header_ops = &eth_header_ops;<===
337 dev->type = ARPHRD_ETHER;
338 dev->hard_header_len = ETH_HLEN;
339 dev->mtu = ETH_DATA_LEN;
340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING;
344
345 memset(dev->broadcast, 0xFF, ETH_ALEN);
346
347 }

register_pernet_subsys 笔记

pernet ops

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
*
* Register a subsystem which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
int register_pernet_subsys(struct pernet_operations *ops)
{
int error;
mutex_lock(&net_mutex);
error = register_pernet_operations(first_device, ops);
mutex_unlock(&net_mutex);
return error;
}
===>static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
error = __register_pernet_operations(list, ops);
}

======>#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
LIST_HEAD(net_exit_list);

list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
for_each_net(net) {
error = ops_init(ops, net);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
}
}
return 0;


========>#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
struct net *net;
int error;
LIST_HEAD(net_exit_list);

list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
for_each_net(net) {
=============> error = ops_init(ops, net);
if (error)
goto out_undo;
=============> list_add_tail(&net->exit_list, &net_exit_list);<<< confused?!!! net_exit_list局部变量?

}
}
return 0;


=============>static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err;
if (ops->id && ops->size) {
void *data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
return -ENOMEM;

err = net_assign_generic(net, *ops->id, data);
if (err) {
kfree(data);
return err;
}
}
if (ops->init)
return ops->init(net);<====== the ops->init will be called.
return 0;
}

####Fox example
inet6_init in pernet.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
static struct pernet_operations inet6_net_ops = { 
.init = inet6_net_init,
.exit = inet6_net_exit,
};
static int __init inet6_init(void)
{
.....
err = register_pernet_subsys(&inet6_net_ops);
if (err)
goto register_pernet_fail;
.....
}
call: ops->init(net);<====== the ops->init will be called.
equal with ======= .init = inet6_net_init,
inet6_net_init(net);