]> Git Repo - J-linux.git/blob - net/core/sysctl_net_core.c
random: check for signals after page of pool writes
[J-linux.git] / net / core / sysctl_net_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* -*- linux-c -*-
3  * sysctl_net_core.c: sysctl interface to net core subsystem.
4  *
5  * Begun April 1, 1996, Mike Shaver.
6  * Added /proc/sys/net/core directory entry (empty =) ). [MS]
7  */
8
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 #include <linux/sysctl.h>
12 #include <linux/module.h>
13 #include <linux/socket.h>
14 #include <linux/netdevice.h>
15 #include <linux/ratelimit.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19
20 #include <net/ip.h>
21 #include <net/sock.h>
22 #include <net/net_ratelimit.h>
23 #include <net/busy_poll.h>
24 #include <net/pkt_sched.h>
25
26 static int two = 2;
27 static int three = 3;
28 static int int_3600 = 3600;
29 static int min_sndbuf = SOCK_MIN_SNDBUF;
30 static int min_rcvbuf = SOCK_MIN_RCVBUF;
31 static int max_skb_frags = MAX_SKB_FRAGS;
32 static long long_one __maybe_unused = 1;
33 static long long_max __maybe_unused = LONG_MAX;
34
35 static int net_msg_warn;        /* Unused, but still a sysctl */
36
37 int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
38 EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
39
40 /* 0 - Keep current behavior:
41  *     IPv4: inherit all current settings from init_net
42  *     IPv6: reset all settings to default
43  * 1 - Both inherit all current settings from init_net
44  * 2 - Both reset all settings to default
45  * 3 - Both inherit all settings from current netns
46  */
47 int sysctl_devconf_inherit_init_net __read_mostly;
48 EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
49
50 #ifdef CONFIG_RPS
51 static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
52                                 void *buffer, size_t *lenp, loff_t *ppos)
53 {
54         unsigned int orig_size, size;
55         int ret, i;
56         struct ctl_table tmp = {
57                 .data = &size,
58                 .maxlen = sizeof(size),
59                 .mode = table->mode
60         };
61         struct rps_sock_flow_table *orig_sock_table, *sock_table;
62         static DEFINE_MUTEX(sock_flow_mutex);
63
64         mutex_lock(&sock_flow_mutex);
65
66         orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
67                                         lockdep_is_held(&sock_flow_mutex));
68         size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
69
70         ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
71
72         if (write) {
73                 if (size) {
74                         if (size > 1<<29) {
75                                 /* Enforce limit to prevent overflow */
76                                 mutex_unlock(&sock_flow_mutex);
77                                 return -EINVAL;
78                         }
79                         size = roundup_pow_of_two(size);
80                         if (size != orig_size) {
81                                 sock_table =
82                                     vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
83                                 if (!sock_table) {
84                                         mutex_unlock(&sock_flow_mutex);
85                                         return -ENOMEM;
86                                 }
87                                 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
88                                 sock_table->mask = size - 1;
89                         } else
90                                 sock_table = orig_sock_table;
91
92                         for (i = 0; i < size; i++)
93                                 sock_table->ents[i] = RPS_NO_CPU;
94                 } else
95                         sock_table = NULL;
96
97                 if (sock_table != orig_sock_table) {
98                         rcu_assign_pointer(rps_sock_flow_table, sock_table);
99                         if (sock_table) {
100                                 static_branch_inc(&rps_needed);
101                                 static_branch_inc(&rfs_needed);
102                         }
103                         if (orig_sock_table) {
104                                 static_branch_dec(&rps_needed);
105                                 static_branch_dec(&rfs_needed);
106                                 kvfree_rcu(orig_sock_table);
107                         }
108                 }
109         }
110
111         mutex_unlock(&sock_flow_mutex);
112
113         return ret;
114 }
115 #endif /* CONFIG_RPS */
116
117 #ifdef CONFIG_NET_FLOW_LIMIT
118 static DEFINE_MUTEX(flow_limit_update_mutex);
119
120 static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
121                                  void *buffer, size_t *lenp, loff_t *ppos)
122 {
123         struct sd_flow_limit *cur;
124         struct softnet_data *sd;
125         cpumask_var_t mask;
126         int i, len, ret = 0;
127
128         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
129                 return -ENOMEM;
130
131         if (write) {
132                 ret = cpumask_parse(buffer, mask);
133                 if (ret)
134                         goto done;
135
136                 mutex_lock(&flow_limit_update_mutex);
137                 len = sizeof(*cur) + netdev_flow_limit_table_len;
138                 for_each_possible_cpu(i) {
139                         sd = &per_cpu(softnet_data, i);
140                         cur = rcu_dereference_protected(sd->flow_limit,
141                                      lockdep_is_held(&flow_limit_update_mutex));
142                         if (cur && !cpumask_test_cpu(i, mask)) {
143                                 RCU_INIT_POINTER(sd->flow_limit, NULL);
144                                 kfree_rcu(cur);
145                         } else if (!cur && cpumask_test_cpu(i, mask)) {
146                                 cur = kzalloc_node(len, GFP_KERNEL,
147                                                    cpu_to_node(i));
148                                 if (!cur) {
149                                         /* not unwinding previous changes */
150                                         ret = -ENOMEM;
151                                         goto write_unlock;
152                                 }
153                                 cur->num_buckets = netdev_flow_limit_table_len;
154                                 rcu_assign_pointer(sd->flow_limit, cur);
155                         }
156                 }
157 write_unlock:
158                 mutex_unlock(&flow_limit_update_mutex);
159         } else {
160                 char kbuf[128];
161
162                 if (*ppos || !*lenp) {
163                         *lenp = 0;
164                         goto done;
165                 }
166
167                 cpumask_clear(mask);
168                 rcu_read_lock();
169                 for_each_possible_cpu(i) {
170                         sd = &per_cpu(softnet_data, i);
171                         if (rcu_dereference(sd->flow_limit))
172                                 cpumask_set_cpu(i, mask);
173                 }
174                 rcu_read_unlock();
175
176                 len = min(sizeof(kbuf) - 1, *lenp);
177                 len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
178                 if (!len) {
179                         *lenp = 0;
180                         goto done;
181                 }
182                 if (len < *lenp)
183                         kbuf[len++] = '\n';
184                 memcpy(buffer, kbuf, len);
185                 *lenp = len;
186                 *ppos += len;
187         }
188
189 done:
190         free_cpumask_var(mask);
191         return ret;
192 }
193
194 static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
195                                        void *buffer, size_t *lenp, loff_t *ppos)
196 {
197         unsigned int old, *ptr;
198         int ret;
199
200         mutex_lock(&flow_limit_update_mutex);
201
202         ptr = table->data;
203         old = *ptr;
204         ret = proc_dointvec(table, write, buffer, lenp, ppos);
205         if (!ret && write && !is_power_of_2(*ptr)) {
206                 *ptr = old;
207                 ret = -EINVAL;
208         }
209
210         mutex_unlock(&flow_limit_update_mutex);
211         return ret;
212 }
213 #endif /* CONFIG_NET_FLOW_LIMIT */
214
215 #ifdef CONFIG_NET_SCHED
216 static int set_default_qdisc(struct ctl_table *table, int write,
217                              void *buffer, size_t *lenp, loff_t *ppos)
218 {
219         char id[IFNAMSIZ];
220         struct ctl_table tbl = {
221                 .data = id,
222                 .maxlen = IFNAMSIZ,
223         };
224         int ret;
225
226         qdisc_get_default(id, IFNAMSIZ);
227
228         ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
229         if (write && ret == 0)
230                 ret = qdisc_set_default(id);
231         return ret;
232 }
233 #endif
234
235 static int proc_do_dev_weight(struct ctl_table *table, int write,
236                            void *buffer, size_t *lenp, loff_t *ppos)
237 {
238         int ret;
239
240         ret = proc_dointvec(table, write, buffer, lenp, ppos);
241         if (ret != 0)
242                 return ret;
243
244         dev_rx_weight = weight_p * dev_weight_rx_bias;
245         dev_tx_weight = weight_p * dev_weight_tx_bias;
246
247         return ret;
248 }
249
250 static int proc_do_rss_key(struct ctl_table *table, int write,
251                            void *buffer, size_t *lenp, loff_t *ppos)
252 {
253         struct ctl_table fake_table;
254         char buf[NETDEV_RSS_KEY_LEN * 3];
255
256         snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
257         fake_table.data = buf;
258         fake_table.maxlen = sizeof(buf);
259         return proc_dostring(&fake_table, write, buffer, lenp, ppos);
260 }
261
262 #ifdef CONFIG_BPF_JIT
263 static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
264                                            void *buffer, size_t *lenp,
265                                            loff_t *ppos)
266 {
267         int ret, jit_enable = *(int *)table->data;
268         struct ctl_table tmp = *table;
269
270         if (write && !capable(CAP_SYS_ADMIN))
271                 return -EPERM;
272
273         tmp.data = &jit_enable;
274         ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
275         if (write && !ret) {
276                 if (jit_enable < 2 ||
277                     (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
278                         *(int *)table->data = jit_enable;
279                         if (jit_enable == 2)
280                                 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
281                 } else {
282                         ret = -EPERM;
283                 }
284         }
285         return ret;
286 }
287
288 # ifdef CONFIG_HAVE_EBPF_JIT
289 static int
290 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
291                                     void *buffer, size_t *lenp, loff_t *ppos)
292 {
293         if (!capable(CAP_SYS_ADMIN))
294                 return -EPERM;
295
296         return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
297 }
298 # endif /* CONFIG_HAVE_EBPF_JIT */
299
300 static int
301 proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
302                                      void *buffer, size_t *lenp, loff_t *ppos)
303 {
304         if (!capable(CAP_SYS_ADMIN))
305                 return -EPERM;
306
307         return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
308 }
309 #endif
310
311 static struct ctl_table net_core_table[] = {
312         {
313                 .procname       = "wmem_max",
314                 .data           = &sysctl_wmem_max,
315                 .maxlen         = sizeof(int),
316                 .mode           = 0644,
317                 .proc_handler   = proc_dointvec_minmax,
318                 .extra1         = &min_sndbuf,
319         },
320         {
321                 .procname       = "rmem_max",
322                 .data           = &sysctl_rmem_max,
323                 .maxlen         = sizeof(int),
324                 .mode           = 0644,
325                 .proc_handler   = proc_dointvec_minmax,
326                 .extra1         = &min_rcvbuf,
327         },
328         {
329                 .procname       = "wmem_default",
330                 .data           = &sysctl_wmem_default,
331                 .maxlen         = sizeof(int),
332                 .mode           = 0644,
333                 .proc_handler   = proc_dointvec_minmax,
334                 .extra1         = &min_sndbuf,
335         },
336         {
337                 .procname       = "rmem_default",
338                 .data           = &sysctl_rmem_default,
339                 .maxlen         = sizeof(int),
340                 .mode           = 0644,
341                 .proc_handler   = proc_dointvec_minmax,
342                 .extra1         = &min_rcvbuf,
343         },
344         {
345                 .procname       = "dev_weight",
346                 .data           = &weight_p,
347                 .maxlen         = sizeof(int),
348                 .mode           = 0644,
349                 .proc_handler   = proc_do_dev_weight,
350         },
351         {
352                 .procname       = "dev_weight_rx_bias",
353                 .data           = &dev_weight_rx_bias,
354                 .maxlen         = sizeof(int),
355                 .mode           = 0644,
356                 .proc_handler   = proc_do_dev_weight,
357         },
358         {
359                 .procname       = "dev_weight_tx_bias",
360                 .data           = &dev_weight_tx_bias,
361                 .maxlen         = sizeof(int),
362                 .mode           = 0644,
363                 .proc_handler   = proc_do_dev_weight,
364         },
365         {
366                 .procname       = "netdev_max_backlog",
367                 .data           = &netdev_max_backlog,
368                 .maxlen         = sizeof(int),
369                 .mode           = 0644,
370                 .proc_handler   = proc_dointvec
371         },
372         {
373                 .procname       = "netdev_rss_key",
374                 .data           = &netdev_rss_key,
375                 .maxlen         = sizeof(int),
376                 .mode           = 0444,
377                 .proc_handler   = proc_do_rss_key,
378         },
379 #ifdef CONFIG_BPF_JIT
380         {
381                 .procname       = "bpf_jit_enable",
382                 .data           = &bpf_jit_enable,
383                 .maxlen         = sizeof(int),
384                 .mode           = 0644,
385                 .proc_handler   = proc_dointvec_minmax_bpf_enable,
386 # ifdef CONFIG_BPF_JIT_ALWAYS_ON
387                 .extra1         = SYSCTL_ONE,
388                 .extra2         = SYSCTL_ONE,
389 # else
390                 .extra1         = SYSCTL_ZERO,
391                 .extra2         = &two,
392 # endif
393         },
394 # ifdef CONFIG_HAVE_EBPF_JIT
395         {
396                 .procname       = "bpf_jit_harden",
397                 .data           = &bpf_jit_harden,
398                 .maxlen         = sizeof(int),
399                 .mode           = 0600,
400                 .proc_handler   = proc_dointvec_minmax_bpf_restricted,
401                 .extra1         = SYSCTL_ZERO,
402                 .extra2         = &two,
403         },
404         {
405                 .procname       = "bpf_jit_kallsyms",
406                 .data           = &bpf_jit_kallsyms,
407                 .maxlen         = sizeof(int),
408                 .mode           = 0600,
409                 .proc_handler   = proc_dointvec_minmax_bpf_restricted,
410                 .extra1         = SYSCTL_ZERO,
411                 .extra2         = SYSCTL_ONE,
412         },
413 # endif
414         {
415                 .procname       = "bpf_jit_limit",
416                 .data           = &bpf_jit_limit,
417                 .maxlen         = sizeof(long),
418                 .mode           = 0600,
419                 .proc_handler   = proc_dolongvec_minmax_bpf_restricted,
420                 .extra1         = &long_one,
421                 .extra2         = &bpf_jit_limit_max,
422         },
423 #endif
424         {
425                 .procname       = "netdev_tstamp_prequeue",
426                 .data           = &netdev_tstamp_prequeue,
427                 .maxlen         = sizeof(int),
428                 .mode           = 0644,
429                 .proc_handler   = proc_dointvec
430         },
431         {
432                 .procname       = "message_cost",
433                 .data           = &net_ratelimit_state.interval,
434                 .maxlen         = sizeof(int),
435                 .mode           = 0644,
436                 .proc_handler   = proc_dointvec_jiffies,
437         },
438         {
439                 .procname       = "message_burst",
440                 .data           = &net_ratelimit_state.burst,
441                 .maxlen         = sizeof(int),
442                 .mode           = 0644,
443                 .proc_handler   = proc_dointvec,
444         },
445         {
446                 .procname       = "optmem_max",
447                 .data           = &sysctl_optmem_max,
448                 .maxlen         = sizeof(int),
449                 .mode           = 0644,
450                 .proc_handler   = proc_dointvec
451         },
452         {
453                 .procname       = "tstamp_allow_data",
454                 .data           = &sysctl_tstamp_allow_data,
455                 .maxlen         = sizeof(int),
456                 .mode           = 0644,
457                 .proc_handler   = proc_dointvec_minmax,
458                 .extra1         = SYSCTL_ZERO,
459                 .extra2         = SYSCTL_ONE
460         },
461 #ifdef CONFIG_RPS
462         {
463                 .procname       = "rps_sock_flow_entries",
464                 .maxlen         = sizeof(int),
465                 .mode           = 0644,
466                 .proc_handler   = rps_sock_flow_sysctl
467         },
468 #endif
469 #ifdef CONFIG_NET_FLOW_LIMIT
470         {
471                 .procname       = "flow_limit_cpu_bitmap",
472                 .mode           = 0644,
473                 .proc_handler   = flow_limit_cpu_sysctl
474         },
475         {
476                 .procname       = "flow_limit_table_len",
477                 .data           = &netdev_flow_limit_table_len,
478                 .maxlen         = sizeof(int),
479                 .mode           = 0644,
480                 .proc_handler   = flow_limit_table_len_sysctl
481         },
482 #endif /* CONFIG_NET_FLOW_LIMIT */
483 #ifdef CONFIG_NET_RX_BUSY_POLL
484         {
485                 .procname       = "busy_poll",
486                 .data           = &sysctl_net_busy_poll,
487                 .maxlen         = sizeof(unsigned int),
488                 .mode           = 0644,
489                 .proc_handler   = proc_dointvec_minmax,
490                 .extra1         = SYSCTL_ZERO,
491         },
492         {
493                 .procname       = "busy_read",
494                 .data           = &sysctl_net_busy_read,
495                 .maxlen         = sizeof(unsigned int),
496                 .mode           = 0644,
497                 .proc_handler   = proc_dointvec_minmax,
498                 .extra1         = SYSCTL_ZERO,
499         },
500 #endif
501 #ifdef CONFIG_NET_SCHED
502         {
503                 .procname       = "default_qdisc",
504                 .mode           = 0644,
505                 .maxlen         = IFNAMSIZ,
506                 .proc_handler   = set_default_qdisc
507         },
508 #endif
509         {
510                 .procname       = "netdev_budget",
511                 .data           = &netdev_budget,
512                 .maxlen         = sizeof(int),
513                 .mode           = 0644,
514                 .proc_handler   = proc_dointvec
515         },
516         {
517                 .procname       = "warnings",
518                 .data           = &net_msg_warn,
519                 .maxlen         = sizeof(int),
520                 .mode           = 0644,
521                 .proc_handler   = proc_dointvec
522         },
523         {
524                 .procname       = "max_skb_frags",
525                 .data           = &sysctl_max_skb_frags,
526                 .maxlen         = sizeof(int),
527                 .mode           = 0644,
528                 .proc_handler   = proc_dointvec_minmax,
529                 .extra1         = SYSCTL_ONE,
530                 .extra2         = &max_skb_frags,
531         },
532         {
533                 .procname       = "netdev_budget_usecs",
534                 .data           = &netdev_budget_usecs,
535                 .maxlen         = sizeof(unsigned int),
536                 .mode           = 0644,
537                 .proc_handler   = proc_dointvec_minmax,
538                 .extra1         = SYSCTL_ZERO,
539         },
540         {
541                 .procname       = "fb_tunnels_only_for_init_net",
542                 .data           = &sysctl_fb_tunnels_only_for_init_net,
543                 .maxlen         = sizeof(int),
544                 .mode           = 0644,
545                 .proc_handler   = proc_dointvec_minmax,
546                 .extra1         = SYSCTL_ZERO,
547                 .extra2         = &two,
548         },
549         {
550                 .procname       = "devconf_inherit_init_net",
551                 .data           = &sysctl_devconf_inherit_init_net,
552                 .maxlen         = sizeof(int),
553                 .mode           = 0644,
554                 .proc_handler   = proc_dointvec_minmax,
555                 .extra1         = SYSCTL_ZERO,
556                 .extra2         = &three,
557         },
558         {
559                 .procname       = "high_order_alloc_disable",
560                 .data           = &net_high_order_alloc_disable_key.key,
561                 .maxlen         = sizeof(net_high_order_alloc_disable_key),
562                 .mode           = 0644,
563                 .proc_handler   = proc_do_static_key,
564         },
565         {
566                 .procname       = "gro_normal_batch",
567                 .data           = &gro_normal_batch,
568                 .maxlen         = sizeof(unsigned int),
569                 .mode           = 0644,
570                 .proc_handler   = proc_dointvec_minmax,
571                 .extra1         = SYSCTL_ONE,
572         },
573         {
574                 .procname       = "netdev_unregister_timeout_secs",
575                 .data           = &netdev_unregister_timeout_secs,
576                 .maxlen         = sizeof(unsigned int),
577                 .mode           = 0644,
578                 .proc_handler   = proc_dointvec_minmax,
579                 .extra1         = SYSCTL_ONE,
580                 .extra2         = &int_3600,
581         },
582         { }
583 };
584
585 static struct ctl_table netns_core_table[] = {
586         {
587                 .procname       = "somaxconn",
588                 .data           = &init_net.core.sysctl_somaxconn,
589                 .maxlen         = sizeof(int),
590                 .mode           = 0644,
591                 .extra1         = SYSCTL_ZERO,
592                 .proc_handler   = proc_dointvec_minmax
593         },
594         {
595                 .procname       = "txrehash",
596                 .data           = &init_net.core.sysctl_txrehash,
597                 .maxlen         = sizeof(u8),
598                 .mode           = 0644,
599                 .extra1         = SYSCTL_ZERO,
600                 .extra2         = SYSCTL_ONE,
601                 .proc_handler   = proc_dou8vec_minmax,
602         },
603         { }
604 };
605
606 static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
607 {
608         /* fallback tunnels for initns only */
609         if (!strncmp(str, "initns", 6))
610                 sysctl_fb_tunnels_only_for_init_net = 1;
611         /* no fallback tunnels anywhere */
612         else if (!strncmp(str, "none", 4))
613                 sysctl_fb_tunnels_only_for_init_net = 2;
614
615         return 1;
616 }
617 __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup);
618
619 static __net_init int sysctl_core_net_init(struct net *net)
620 {
621         struct ctl_table *tbl, *tmp;
622
623         tbl = netns_core_table;
624         if (!net_eq(net, &init_net)) {
625                 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
626                 if (tbl == NULL)
627                         goto err_dup;
628
629                 for (tmp = tbl; tmp->procname; tmp++)
630                         tmp->data += (char *)net - (char *)&init_net;
631
632                 /* Don't export any sysctls to unprivileged users */
633                 if (net->user_ns != &init_user_ns) {
634                         tbl[0].procname = NULL;
635                 }
636         }
637
638         net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
639         if (net->core.sysctl_hdr == NULL)
640                 goto err_reg;
641
642         return 0;
643
644 err_reg:
645         if (tbl != netns_core_table)
646                 kfree(tbl);
647 err_dup:
648         return -ENOMEM;
649 }
650
651 static __net_exit void sysctl_core_net_exit(struct net *net)
652 {
653         struct ctl_table *tbl;
654
655         tbl = net->core.sysctl_hdr->ctl_table_arg;
656         unregister_net_sysctl_table(net->core.sysctl_hdr);
657         BUG_ON(tbl == netns_core_table);
658         kfree(tbl);
659 }
660
661 static __net_initdata struct pernet_operations sysctl_core_ops = {
662         .init = sysctl_core_net_init,
663         .exit = sysctl_core_net_exit,
664 };
665
666 static __init int sysctl_core_init(void)
667 {
668         register_net_sysctl(&init_net, "net/core", net_core_table);
669         return register_pernet_subsys(&sysctl_core_ops);
670 }
671
672 fs_initcall(sysctl_core_init);
This page took 0.069074 seconds and 4 git commands to generate.