This file is indexed.

/usr/share/doc/ganeti/html/design-2.3.html is in ganeti-doc 2.16.0~rc2-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">

<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
  <head>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
    <title>Ganeti 2.3 design &#8212; Ganeti 2.16.0~rc2 documentation</title>
    <link rel="stylesheet" href="_static/style.css" type="text/css" />
    <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
    <script type="text/javascript">
      var DOCUMENTATION_OPTIONS = {
        URL_ROOT:    './',
        VERSION:     '2.16.0~rc2',
        COLLAPSE_INDEX: false,
        FILE_SUFFIX: '.html',
        HAS_SOURCE:  true,
        SOURCELINK_SUFFIX: '.txt'
      };
    </script>
    <script type="text/javascript" src="_static/jquery.js"></script>
    <script type="text/javascript" src="_static/underscore.js"></script>
    <script type="text/javascript" src="_static/doctools.js"></script>
    <link rel="search" title="Search" href="search.html" />
    <link rel="next" title="Synchronising htools to Ganeti 2.3" href="design-htools-2.3.html" />
    <link rel="prev" title="Ganeti 2.2 design" href="design-2.2.html" /> 
  </head>
  <body>
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="design-htools-2.3.html" title="Synchronising htools to Ganeti 2.3"
             accesskey="N">next</a></li>
        <li class="right" >
          <a href="design-2.2.html" title="Ganeti 2.2 design"
             accesskey="P">previous</a> |</li>
        <li class="nav-item nav-item-0"><a href="index.html">Ganeti 2.16.0~rc2 documentation</a> &#187;</li> 
      </ul>
    </div>  

    <div class="document">
      <div class="documentwrapper">
        <div class="bodywrapper">
          <div class="body" role="main">
            
  <div class="section" id="ganeti-2-3-design">
<h1><a class="toc-backref" href="#id6">Ganeti 2.3 design</a><a class="headerlink" href="#ganeti-2-3-design" title="Permalink to this headline"></a></h1>
<p>This document describes the major changes in Ganeti 2.3 compared to
the 2.2 version.</p>
<div class="contents topic" id="contents">
<p class="topic-title first">Contents</p>
<ul class="simple">
<li><a class="reference internal" href="#ganeti-2-3-design" id="id6">Ganeti 2.3 design</a><ul>
<li><a class="reference internal" href="#core-changes" id="id7">Core changes</a><ul>
<li><a class="reference internal" href="#node-groups" id="id8">Node Groups</a><ul>
<li><a class="reference internal" href="#current-state-and-shortcomings" id="id9">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#proposed-changes" id="id10">Proposed changes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#scalability-issues-with-big-clusters" id="id11">Scalability issues with big clusters</a><ul>
<li><a class="reference internal" href="#current-and-future-issues" id="id12">Current and future issues</a></li>
<li><a class="reference internal" href="#cluster-state-cache" id="id13">Cluster state cache</a></li>
<li><a class="reference internal" href="#watcher-operation" id="id14">Watcher operation</a></li>
<li><a class="reference internal" href="#capacity-calculations" id="id15">Capacity calculations</a></li>
</ul>
</li>
<li><a class="reference internal" href="#node-flags" id="id16">Node flags</a><ul>
<li><a class="reference internal" href="#id1" id="id17">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#id2" id="id18">Proposed changes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#job-priorities" id="id19">Job priorities</a><ul>
<li><a class="reference internal" href="#id3" id="id20">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#id4" id="id21">Proposed changes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#ipv6-support" id="id22">IPv6 support</a><ul>
<li><a class="reference internal" href="#supported-ipv6-setup" id="id23">Supported IPv6 setup</a></li>
<li><a class="reference internal" href="#netutils-utilities-for-handling-common-network-tasks" id="id24">netutils: Utilities for handling common network tasks</a></li>
<li><a class="reference internal" href="#cluster-initialization" id="id25">Cluster initialization</a></li>
<li><a class="reference internal" href="#node-addition" id="id26">Node addition</a></li>
<li><a class="reference internal" href="#name-resolution" id="id27">Name resolution</a></li>
<li><a class="reference internal" href="#ipv4-only-components" id="id28">IPv4-only components</a></li>
</ul>
</li>
<li><a class="reference internal" href="#privilege-separation" id="id29">Privilege Separation</a><ul>
<li><a class="reference internal" href="#id5" id="id30">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#implementation" id="id31">Implementation</a></li>
<li><a class="reference internal" href="#security-domains" id="id32">Security Domains</a></li>
<li><a class="reference internal" href="#restricted-commands" id="id33">Restricted commands</a></li>
<li><a class="reference internal" href="#directory-structure-and-permissions" id="id34">Directory structure and permissions</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#feature-changes" id="id35">Feature changes</a></li>
<li><a class="reference internal" href="#external-interface-changes" id="id36">External interface changes</a></li>
</ul>
</li>
</ul>
</div>
<p>As for 2.1 and 2.2 we divide the 2.3 design into three areas:</p>
<ul class="simple">
<li>core changes, which affect the master daemon/job queue/locking or
all/most logical units</li>
<li>logical unit/feature changes</li>
<li>external interface changes (e.g. command line, OS API, hooks, …)</li>
</ul>
<div class="section" id="core-changes">
<h2><a class="toc-backref" href="#id7">Core changes</a><a class="headerlink" href="#core-changes" title="Permalink to this headline"></a></h2>
<div class="section" id="node-groups">
<h3><a class="toc-backref" href="#id8">Node Groups</a><a class="headerlink" href="#node-groups" title="Permalink to this headline"></a></h3>
<div class="section" id="current-state-and-shortcomings">
<h4><a class="toc-backref" href="#id9">Current state and shortcomings</a><a class="headerlink" href="#current-state-and-shortcomings" title="Permalink to this headline"></a></h4>
<p>Currently all nodes of a Ganeti cluster are considered as part of the
same pool, for allocation purposes: DRBD instances for example can be
allocated on any two nodes.</p>
<p>This does cause a problem in cases where nodes are not all equally
connected to each other. For example if a cluster is created over two
set of machines, each connected to its own switch, the internal bandwidth
between machines connected to the same switch might be bigger than the
bandwidth for inter-switch connections.</p>
<p>Moreover, some operations inside a cluster require all nodes to be locked
together for inter-node consistency, and won’t scale if we increase the
number of nodes to a few hundreds.</p>
</div>
<div class="section" id="proposed-changes">
<h4><a class="toc-backref" href="#id10">Proposed changes</a><a class="headerlink" href="#proposed-changes" title="Permalink to this headline"></a></h4>
<p>With this change we’ll divide Ganeti nodes into groups. Nothing will
change for clusters with only one node group. Bigger clusters will be
able to have more than one group, and each node will belong to exactly
one.</p>
<div class="section" id="node-group-management">
<h5>Node group management<a class="headerlink" href="#node-group-management" title="Permalink to this headline"></a></h5>
<p>To manage node groups and the nodes belonging to them, the following new
commands and flags will be introduced:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">gnt</span><span class="o">-</span><span class="n">group</span> <span class="n">add</span> <span class="o">&lt;</span><span class="n">group</span><span class="o">&gt;</span> <span class="c1"># add a new node group</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">group</span> <span class="n">remove</span> <span class="o">&lt;</span><span class="n">group</span><span class="o">&gt;</span> <span class="c1"># delete an empty node group</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">group</span> <span class="nb">list</span> <span class="c1"># list node groups</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">group</span> <span class="n">rename</span> <span class="o">&lt;</span><span class="n">oldname</span><span class="o">&gt;</span> <span class="o">&lt;</span><span class="n">newname</span><span class="o">&gt;</span> <span class="c1"># rename a node group</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">node</span> <span class="p">{</span><span class="nb">list</span><span class="p">,</span><span class="n">info</span><span class="p">}</span> <span class="o">-</span><span class="n">g</span> <span class="o">&lt;</span><span class="n">group</span><span class="o">&gt;</span> <span class="c1"># list only nodes belonging to a node group</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">node</span> <span class="n">modify</span> <span class="o">-</span><span class="n">g</span> <span class="o">&lt;</span><span class="n">group</span><span class="o">&gt;</span> <span class="c1"># assign a node to a node group</span>
</pre></div>
</div>
</div>
<div class="section" id="node-group-attributes">
<h5>Node group attributes<a class="headerlink" href="#node-group-attributes" title="Permalink to this headline"></a></h5>
<p>In clusters with more than one node group, it may be desirable to
establish local policies regarding which groups should be preferred when
performing allocation of new instances, or inter-group instance migrations.</p>
<p>To help with this, we will provide an <code class="docutils literal"><span class="pre">alloc_policy</span></code> attribute for
node groups. Such attribute will be honored by iallocator plugins when
making automatic decisions regarding instance placement.</p>
<p>The <code class="docutils literal"><span class="pre">alloc_policy</span></code> attribute can have the following values:</p>
<ul class="simple">
<li>unallocable: the node group should not be a candidate for instance
allocations, and the operation should fail if only groups in this
state could be found that would satisfy the requirements.</li>
<li>last_resort: the node group should not be used for instance
allocations, unless this would be the only way to have the operation
succeed. Prioritization among groups in this state will be deferred to
the iallocator plugin that’s being used.</li>
<li>preferred: the node group can be used freely for allocation of
instances (this is the default state for newly created node
groups). Note that prioritization among groups in this state will be
deferred to the iallocator plugin that’s being used.</li>
</ul>
</div>
<div class="section" id="node-group-operations">
<h5>Node group operations<a class="headerlink" href="#node-group-operations" title="Permalink to this headline"></a></h5>
<p>One operation at the node group level will be initially provided:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">gnt</span><span class="o">-</span><span class="n">group</span> <span class="n">drain</span> <span class="o">&lt;</span><span class="n">group</span><span class="o">&gt;</span>
</pre></div>
</div>
<p>The purpose of this operation is to migrate all instances in a given
node group to other groups in the cluster, e.g. to reclaim capacity if
there are enough free resources in other node groups that share a
storage pool with the evacuated group.</p>
</div>
<div class="section" id="instance-level-changes">
<h5>Instance level changes<a class="headerlink" href="#instance-level-changes" title="Permalink to this headline"></a></h5>
<p>With the introduction of node groups, instances will be required to live
in only one group at a time; this is mostly important for DRBD
instances, which will not be allowed to have their primary and secondary
nodes in different node groups. To support this, we envision the
following changes:</p>
<blockquote>
<div><ul class="simple">
<li>The iallocator interface will be augmented, and node groups exposed,
so that plugins will be able to make a decision regarding the group
in which to place a new instance. By default, all node groups will
be considered, but it will be possible to include a list of groups
in the creation job, in which case the plugin will limit itself to
considering those; in both cases, the <code class="docutils literal"><span class="pre">alloc_policy</span></code> attribute
will be honored.</li>
<li>If, on the other hand, a primary and secondary nodes are specified
for a new instance, they will be required to be on the same node
group.</li>
<li>Moving an instance between groups can only happen via an explicit
operation, which for example in the case of DRBD will work by
performing internally a replace-disks, a migration, and a second
replace-disks. It will be possible to clean up an interrupted
group-move operation.</li>
<li>Cluster verify will signal an error if an instance has nodes
belonging to different groups. Additionally, changing the group of a
given node will be initially only allowed if the node is empty, as a
straightforward mechanism to avoid creating such situation.</li>
<li>Inter-group instance migration will have the same operation modes as
new instance allocation, defined above: letting an iallocator plugin
decide the target group, possibly restricting the set of node groups
to consider, or specifying a target primary and secondary nodes. In
both cases, the target group or nodes must be able to accept the
instance network- and storage-wise; the operation will fail
otherwise, though in the future we may be able to allow some
parameter to be changed together with the move (in the meantime, an
import/export will be required in this scenario).</li>
</ul>
</div></blockquote>
</div>
<div class="section" id="internal-changes">
<h5>Internal changes<a class="headerlink" href="#internal-changes" title="Permalink to this headline"></a></h5>
<p>We expect the following changes for cluster management:</p>
<blockquote>
<div><ul class="simple">
<li>Frequent multinode operations, such as os-diagnose or cluster-verify,
will act on one group at a time, which will have to be specified in
all cases, except for clusters with just one group. Command line
tools will also have a way to easily target all groups, by
generating one job per group.</li>
<li>Groups will have a human-readable name, but will internally always
be referenced by a UUID, which will be immutable; for example, nodes
will contain the UUID of the group they belong to. This is done
to simplify referencing while keeping it easy to handle renames and
movements. If we see that this works well, we’ll transition other
config objects (instances, nodes) to the same model.</li>
<li>The addition of a new per-group lock will be evaluated, if we can
transition some operations now requiring the BGL to it.</li>
<li>Master candidate status will be allowed to be spread among groups.
For the first version we won’t add any restriction over how this is
done, although in the future we may have a minimum number of master
candidates which Ganeti will try to keep in each group, for example.</li>
</ul>
</div></blockquote>
</div>
<div class="section" id="other-work-and-future-changes">
<h5>Other work and future changes<a class="headerlink" href="#other-work-and-future-changes" title="Permalink to this headline"></a></h5>
<p>Commands like <code class="docutils literal"><span class="pre">gnt-cluster</span> <span class="pre">command</span></code>/<code class="docutils literal"><span class="pre">gnt-cluster</span> <span class="pre">copyfile</span></code> will
continue to work on the whole cluster, but it will be possible to target
one group only by specifying it.</p>
<p>Commands which allow selection of sets of resources (for example
<code class="docutils literal"><span class="pre">gnt-instance</span> <span class="pre">start</span></code>/<code class="docutils literal"><span class="pre">gnt-instance</span> <span class="pre">stop</span></code>) will be able to select
them by node group as well.</p>
<p>Initially node groups won’t be taggable objects, to simplify the first
implementation, but we expect this to be easy to add in a future version
should we see it’s useful.</p>
<p>We envision groups as a good place to enhance cluster scalability. In
the future we may want to use them as units for configuration diffusion,
to allow a better master scalability. For example it could be possible
to change some all-nodes RPCs to contact each group once, from the
master, and make one node in the group perform internal diffusion. We
won’t implement this in the first version, but we’ll evaluate it for the
future, if we see scalability problems on big multi-group clusters.</p>
<p>When Ganeti will support more storage models (e.g. SANs, Sheepdog, Ceph)
we expect groups to be the basis for this, allowing for example a
different Sheepdog/Ceph cluster, or a different SAN to be connected to
each group. In some cases this will mean that inter-group move operation
will be necessarily performed with instance downtime, unless the
hypervisor has block-migrate functionality, and we implement support for
it (this would be theoretically possible, today, with KVM, for example).</p>
</div>
</div>
</div>
<div class="section" id="scalability-issues-with-big-clusters">
<h3><a class="toc-backref" href="#id11">Scalability issues with big clusters</a><a class="headerlink" href="#scalability-issues-with-big-clusters" title="Permalink to this headline"></a></h3>
<div class="section" id="current-and-future-issues">
<h4><a class="toc-backref" href="#id12">Current and future issues</a><a class="headerlink" href="#current-and-future-issues" title="Permalink to this headline"></a></h4>
<p>Assuming the node groups feature will enable bigger clusters, other
parts of Ganeti will be impacted even more by the (in effect) bigger
clusters.</p>
<p>While many areas will be impacted, one is the most important: the fact
that the watcher still needs to be able to repair instance data on the
current 5 minutes time-frame (a shorter time-frame would be even
better). This means that the watcher itself needs to have parallelism
when dealing with node groups.</p>
<p>Also, the iallocator plugins are being fed data from Ganeti but also
need access to the full cluster state, and in general we still rely on
being able to compute the full cluster state somewhat “cheaply” and
on-demand. This conflicts with the goal of disconnecting the different
node groups, and to keep the same parallelism while growing the cluster
size.</p>
<p>Another issue is that the current capacity calculations are done
completely outside Ganeti (and they need access to the entire cluster
state), and this prevents keeping the capacity numbers in sync with the
cluster state. While this is still acceptable for smaller clusters where
a small number of allocations/removal are presumed to occur between two
periodic capacity calculations, on bigger clusters where we aim to
parallelize heavily between node groups this is no longer true.</p>
<p>As proposed changes, the main change is introducing a cluster state
cache (not serialised to disk), and to update many of the LUs and
cluster operations to account for it. Furthermore, the capacity
calculations will be integrated via a new OpCode/LU, so that we have
faster feedback (instead of periodic computation).</p>
</div>
<div class="section" id="cluster-state-cache">
<h4><a class="toc-backref" href="#id13">Cluster state cache</a><a class="headerlink" href="#cluster-state-cache" title="Permalink to this headline"></a></h4>
<p>A new cluster state cache will be introduced. The cache relies on two
main ideas:</p>
<ul class="simple">
<li>the total node memory, CPU count are very seldom changing; the total
node disk space is also slow changing, but can change at runtime; the
free memory and free disk will change significantly for some jobs, but
on a short timescale; in general, these values will be mostly “constant”
during the lifetime of a job</li>
<li>we already have a periodic set of jobs that query the node and
instance state, driven the by <strong class="command">ganeti-watcher</strong> command, and
we’re just discarding the results after acting on them</li>
</ul>
<p>Given the above, it makes sense to cache the results of node and instance
state (with a focus on the node state) inside the master daemon.</p>
<p>The cache will not be serialised to disk, and will be for the most part
transparent to the outside of the master daemon.</p>
<div class="section" id="cache-structure">
<h5>Cache structure<a class="headerlink" href="#cache-structure" title="Permalink to this headline"></a></h5>
<p>The cache will be oriented with a focus on node groups, so that it will
be easy to invalidate an entire node group, or a subset of nodes, or the
entire cache. The instances will be stored in the node group of their
primary node.</p>
<p>Furthermore, since the node and instance properties determine the
capacity statistics in a deterministic way, the cache will also hold, at
each node group level, the total capacity as determined by the new
capacity iallocator mode.</p>
</div>
<div class="section" id="cache-updates">
<h5>Cache updates<a class="headerlink" href="#cache-updates" title="Permalink to this headline"></a></h5>
<p>The cache will be updated whenever a query for a node state returns
“full” node information (so as to keep the cache state for a given node
consistent). Partial results will not update the cache (see next
paragraph).</p>
<p>Since there will be no way to feed the cache from outside, and we
would like to have a consistent cache view when driven by the watcher,
we’ll introduce a new OpCode/LU for the watcher to run, instead of the
current separate opcodes (see below in the watcher section).</p>
<p>Updates to a node that change a node’s specs “downward” (e.g. less
memory) will invalidate the capacity data. Updates that increase the
node will not invalidate the capacity, as we’re more interested in “at
least available” correctness, not “at most available”.</p>
</div>
<div class="section" id="cache-invalidation">
<h5>Cache invalidation<a class="headerlink" href="#cache-invalidation" title="Permalink to this headline"></a></h5>
<p>If a partial node query is done (e.g. just for the node free space), and
the returned values don’t match with the cache, then the entire node
state will be invalidated.</p>
<p>By default, all LUs will invalidate the caches for all nodes and
instances they lock. If an LU uses the BGL, then it will invalidate the
entire cache. In time, it is expected that LUs will be modified to not
invalidate, if they are not expected to change the node’s and/or
instance’s state (e.g. <code class="docutils literal"><span class="pre">LUInstanceConsole</span></code>, or
<code class="docutils literal"><span class="pre">LUInstanceActivateDisks</span></code>).</p>
<p>Invalidation of a node’s properties will also invalidate the capacity
data associated with that node.</p>
</div>
<div class="section" id="cache-lifetime">
<h5>Cache lifetime<a class="headerlink" href="#cache-lifetime" title="Permalink to this headline"></a></h5>
<p>The cache elements will have an upper bound on their lifetime; the
proposal is to make this an hour, which should be a high enough value to
cover the watcher being blocked by a medium-term job (e.g. 20-30
minutes).</p>
</div>
<div class="section" id="cache-usage">
<h5>Cache usage<a class="headerlink" href="#cache-usage" title="Permalink to this headline"></a></h5>
<p>The cache will be used by default for most queries (e.g. a Luxi call,
without locks, for the entire cluster). Since this will be a change from
the current behaviour, we’ll need to allow non-cached responses,
e.g. via a <code class="docutils literal"><span class="pre">--cache=off</span></code> or similar argument (which will force the
query).</p>
<p>The cache will also be used for the iallocator runs, so that computing
allocation solution can proceed independent from other jobs which lock
parts of the cluster. This is important as we need to separate
allocation on one group from exclusive blocking jobs on other node
groups.</p>
<p>The capacity calculations will also use the cache. This is detailed in
the respective sections.</p>
</div>
</div>
<div class="section" id="watcher-operation">
<h4><a class="toc-backref" href="#id14">Watcher operation</a><a class="headerlink" href="#watcher-operation" title="Permalink to this headline"></a></h4>
<p>As detailed in the cluster cache section, the watcher also needs
improvements in order to scale with the the cluster size.</p>
<p>As a first improvement, the proposal is to introduce a new OpCode/LU
pair that runs with locks held over the entire query sequence (the
current watcher runs a job with two opcodes, which grab and release the
locks individually). The new opcode will be called
<code class="docutils literal"><span class="pre">OpUpdateNodeGroupCache</span></code> and will do the following:</p>
<ul class="simple">
<li>try to acquire all node/instance locks (to examine in more depth, and
possibly alter) in the given node group</li>
<li>invalidate the cache for the node group</li>
<li>acquire node and instance state (possibly via a new single RPC call
that combines node and instance information)</li>
<li>update cache</li>
<li>return the needed data</li>
</ul>
<p>The reason for the per-node group query is that we don’t want a busy
node group to prevent instance maintenance in other node
groups. Therefore, the watcher will introduce parallelism across node
groups, and it will possible to have overlapping watcher runs. The new
execution sequence will be:</p>
<ul class="simple">
<li>the parent watcher process acquires global watcher lock</li>
<li>query the list of node groups (lockless or very short locks only)</li>
<li>fork N children, one for each node group</li>
<li>release the global lock</li>
<li>poll/wait for the children to finish</li>
</ul>
<p>Each forked children will do the following:</p>
<ul class="simple">
<li>try to acquire the per-node group watcher lock</li>
<li>if fail to acquire, exit with special code telling the parent that the
node group is already being managed by a watcher process</li>
<li>otherwise, submit a OpUpdateNodeGroupCache job</li>
<li>get results (possibly after a long time, due to busy group)</li>
<li>run the needed maintenance operations for the current group</li>
</ul>
<p>This new mode of execution means that the master watcher processes might
overlap in running, but not the individual per-node group child
processes.</p>
<p>This change allows us to keep (almost) the same parallelism when using a
bigger cluster with node groups versus two separate clusters.</p>
<div class="section" id="cost-of-periodic-cache-updating">
<h5>Cost of periodic cache updating<a class="headerlink" href="#cost-of-periodic-cache-updating" title="Permalink to this headline"></a></h5>
<p>Currently the watcher only does “small” queries for the node and
instance state, and at first sight changing it to use the new OpCode
which populates the cache with the entire state might introduce
additional costs, which must be payed every five minutes.</p>
<p>However, the OpCodes that the watcher submits are using the so-called
dynamic fields (need to contact the remote nodes), and the LUs are not
selective—they always grab all the node and instance state. So in the
end, we have the same cost, it just becomes explicit rather than
implicit.</p>
<p>This ‘grab all node state’ behaviour is what makes the cache worth
implementing.</p>
</div>
<div class="section" id="intra-node-group-scalability">
<h5>Intra-node group scalability<a class="headerlink" href="#intra-node-group-scalability" title="Permalink to this headline"></a></h5>
<p>The design above only deals with inter-node group issues. It still makes
sense to run instance maintenance for nodes A and B if only node C is
locked (all being in the same node group).</p>
<p>This problem is commonly encountered in previous Ganeti versions, and it
should be handled similarly, by tweaking lock lifetime in long-duration
jobs.</p>
<p>TODO: add more ideas here.</p>
</div>
<div class="section" id="state-file-maintenance">
<h5>State file maintenance<a class="headerlink" href="#state-file-maintenance" title="Permalink to this headline"></a></h5>
<p>The splitting of node group maintenance to different children which will
run in parallel requires that the state file handling changes from
monolithic updates to partial ones.</p>
<p>There are two file that the watcher maintains:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">$LOCALSTATEDIR/lib/ganeti/watcher.data</span></code>, its internal state file,
used for deciding internal actions</li>
<li><code class="docutils literal"><span class="pre">$LOCALSTATEDIR/run/ganeti/instance-status</span></code>, a file designed for
external consumption</li>
</ul>
<p>For the first file, since it’s used only internally to the watchers, we
can move to a per node group configuration.</p>
<p>For the second file, even if it’s used as an external interface, we will
need to make some changes to it: because the different node groups can
return results at different times, we need to either split the file into
per-group files or keep the single file and add a per-instance timestamp
(currently the file holds only the instance name and state).</p>
<p>The proposal is that each child process maintains its own node group
file, and the master process will, right after querying the node group
list, delete any extra per-node group state file. This leaves the
consumers to run a simple <code class="docutils literal"><span class="pre">cat</span> <span class="pre">instance-status.group-*</span></code> to obtain the
entire list of instance and their states. If needed, the modify
timestamp of each file can be used to determine the age of the results.</p>
</div>
</div>
<div class="section" id="capacity-calculations">
<h4><a class="toc-backref" href="#id15">Capacity calculations</a><a class="headerlink" href="#capacity-calculations" title="Permalink to this headline"></a></h4>
<p>Currently, the capacity calculations are done completely outside
Ganeti. As explained in the current problems section, this needs to
account better for the cluster state changes.</p>
<p>Therefore a new OpCode will be introduced, <code class="docutils literal"><span class="pre">OpComputeCapacity</span></code>, that
will either return the current capacity numbers (if available), or
trigger a new capacity calculation, via the iallocator framework, which
will get a new method called <code class="docutils literal"><span class="pre">capacity</span></code>.</p>
<p>This method will feed the cluster state (for the complete set of node
group, or alternative just a subset) to the iallocator plugin (either
the specified one, or the default if none is specified), and return the
new capacity in the format currently exported by the htools suite and
known as the “tiered specs” (see <em class="manpage">hspace(1)</em>).</p>
<div class="section" id="tspec-cluster-parameters">
<h5>tspec cluster parameters<a class="headerlink" href="#tspec-cluster-parameters" title="Permalink to this headline"></a></h5>
<p>Currently, the “tspec” calculations done in <strong class="command">hspace</strong> require
some additional parameters:</p>
<ul class="simple">
<li>maximum instance size</li>
<li>type of instance storage</li>
<li>maximum ratio of virtual CPUs per physical CPUs</li>
<li>minimum disk free</li>
</ul>
<p>For the integration in Ganeti, there are multiple ways to pass these:</p>
<ul class="simple">
<li>ignored by Ganeti, and being the responsibility of the iallocator
plugin whether to use these at all or not</li>
<li>as input to the opcode</li>
<li>as proper cluster parameters</li>
</ul>
<p>Since the first option is not consistent with the intended changes, a
combination of the last two is proposed:</p>
<ul class="simple">
<li>at cluster level, we’ll have cluster-wide defaults</li>
<li>at node groups, we’ll allow overriding the cluster defaults</li>
<li>and if they are passed in via the opcode, they will override for the
current computation the values</li>
</ul>
<p>Whenever the capacity is requested via different parameters, it will
invalidate the cache, even if otherwise the cache is up-to-date.</p>
<p>The new parameters are:</p>
<ul class="simple">
<li>max_inst_spec: (int, int, int), the maximum instance specification
accepted by this cluster or node group, in the order of memory, disk,
vcpus;</li>
<li>default_template: string, the default disk template to use</li>
<li>max_cpu_ratio: double, the maximum ratio of VCPUs/PCPUs</li>
<li>max_disk_usage: double, the maximum disk usage (as a ratio)</li>
</ul>
<p>These might also be used in instance creations (to be determined later,
after they are introduced).</p>
</div>
<div class="section" id="opcode-details">
<h5>OpCode details<a class="headerlink" href="#opcode-details" title="Permalink to this headline"></a></h5>
<p>Input:</p>
<ul class="simple">
<li>iallocator: string (optional, otherwise uses the cluster default)</li>
<li>cached: boolean, optional, defaults to true, and denotes whether we
accept cached responses</li>
<li>the above new parameters, optional; if they are passed, they will
overwrite all node group’s parameters</li>
</ul>
<p>Output:</p>
<ul class="simple">
<li>cluster: list of tuples (memory, disk, vcpu, count), in decreasing
order of specifications; the first three members represent the
instance specification, the last one the count of how many instances
of this specification can be created on the cluster</li>
<li>node_groups: a dictionary keyed by node group UUID, with values a
dictionary:<ul>
<li>tspecs: a list like the cluster one</li>
<li>additionally, the new cluster parameters, denoting the input
parameters that were used for this node group</li>
</ul>
</li>
<li>ctime: the date the result has been computed; this represents the
oldest creation time amongst all node groups (so as to accurately
represent how much out-of-date the global response is)</li>
</ul>
<p>Note that due to the way the tspecs are computed, for any given
specification, the total available count is the count for the given
entry, plus the sum of counts for higher specifications.</p>
</div>
</div>
</div>
<div class="section" id="node-flags">
<h3><a class="toc-backref" href="#id16">Node flags</a><a class="headerlink" href="#node-flags" title="Permalink to this headline"></a></h3>
<div class="section" id="id1">
<h4><a class="toc-backref" href="#id17">Current state and shortcomings</a><a class="headerlink" href="#id1" title="Permalink to this headline"></a></h4>
<p>Currently all nodes are, from the point of view of their capabilities,
homogeneous. This means the cluster considers all nodes capable of
becoming master candidates, and of hosting instances.</p>
<p>This prevents some deployment scenarios: e.g. having a Ganeti instance
(in another cluster) be just a master candidate, in case all other
master candidates go down (but not, of course, host instances), or
having a node in a remote location just host instances but not become
master, etc.</p>
</div>
<div class="section" id="id2">
<h4><a class="toc-backref" href="#id18">Proposed changes</a><a class="headerlink" href="#id2" title="Permalink to this headline"></a></h4>
<p>Two new capability flags will be added to the node:</p>
<ul class="simple">
<li>master_capable, denoting whether the node can become a master
candidate or master</li>
<li>vm_capable, denoting whether the node can host instances</li>
</ul>
<p>In terms of the other flags, master_capable is a stronger version of
“not master candidate”, and vm_capable is a stronger version of
“drained”.</p>
<p>For the master_capable flag, it will affect auto-promotion code and node
modifications.</p>
<p>The vm_capable flag will affect the iallocator protocol, capacity
calculations, node checks in cluster verify, and will interact in novel
ways with locking (unfortunately).</p>
<p>It is envisaged that most nodes will be both vm_capable and
master_capable, and just a few will have one of these flags
removed. Ganeti itself will allow clearing of both flags, even though
this doesn’t make much sense currently.</p>
</div>
</div>
<div class="section" id="job-priorities">
<span id="jqueue-job-priority-design"></span><h3><a class="toc-backref" href="#id19">Job priorities</a><a class="headerlink" href="#job-priorities" title="Permalink to this headline"></a></h3>
<div class="section" id="id3">
<h4><a class="toc-backref" href="#id20">Current state and shortcomings</a><a class="headerlink" href="#id3" title="Permalink to this headline"></a></h4>
<p>Currently all jobs and opcodes have the same priority. Once a job
started executing, its thread won’t be released until all opcodes got
their locks and did their work. When a job is finished, the next job is
selected strictly by its incoming order. This does not mean jobs are run
in their incoming order—locks and other delays can cause them to be
stalled for some time.</p>
<p>In some situations, e.g. an emergency shutdown, one may want to run a
job as soon as possible. This is not possible currently if there are
pending jobs in the queue.</p>
</div>
<div class="section" id="id4">
<h4><a class="toc-backref" href="#id21">Proposed changes</a><a class="headerlink" href="#id4" title="Permalink to this headline"></a></h4>
<p>Each opcode will be assigned a priority on submission. Opcode priorities
are integers and the lower the number, the higher the opcode’s priority
is. Within the same priority, jobs and opcodes are initially processed
in their incoming order.</p>
<p>Submitted opcodes can have one of the priorities listed below. Other
priorities are reserved for internal use. The absolute range is
-20..+19. Opcodes submitted without a priority (e.g. by older clients)
are assigned the default priority.</p>
<blockquote>
<div><ul class="simple">
<li>High (-10)</li>
<li>Normal (0, default)</li>
<li>Low (+10)</li>
</ul>
</div></blockquote>
<p>As a change from the current model where executing a job blocks one
thread for the whole duration, the new job processor must return the job
to the queue after each opcode and also if it can’t get all locks in a
reasonable timeframe. This will allow opcodes of higher priority
submitted in the meantime to be processed or opcodes of the same
priority to try to get their locks. When added to the job queue’s
workerpool, the priority is determined by the first unprocessed opcode
in the job.</p>
<p>If an opcode is deferred, the job will go back to the “queued” status,
even though it’s just waiting to try to acquire its locks again later.</p>
<p>If an opcode can not be processed after a certain number of retries or a
certain amount of time, it should increase its priority. This will avoid
starvation.</p>
<p>A job’s priority can never go below -20. If a job hits priority -20, it
must acquire its locks in blocking mode.</p>
<p>Opcode priorities are synchronised to disk in order to be restored after
a restart or crash of the master daemon.</p>
<p>Priorities also need to be considered inside the locking library to
ensure opcodes with higher priorities get locks first. See
<a class="reference internal" href="#locking-priorities"><span class="std std-ref">locking priorities</span></a> for more details.</p>
<div class="section" id="worker-pool">
<h5>Worker pool<a class="headerlink" href="#worker-pool" title="Permalink to this headline"></a></h5>
<p>To support job priorities in the job queue, the worker pool underlying
the job queue must be enhanced to support task priorities. Currently
tasks are processed in the order they are added to the queue (but, due
to their nature, they don’t necessarily finish in that order). All tasks
are equal. To support tasks with higher or lower priority, a few changes
have to be made to the queue inside a worker pool.</p>
<p>Each task is assigned a priority when added to the queue. This priority
can not be changed until the task is executed (this is fine as in all
current use-cases, tasks are added to a pool and then forgotten about
until they’re done).</p>
<p>A task’s priority can be compared to Unix’ process priorities. The lower
the priority number, the closer to the queue’s front it is. A task with
priority 0 is going to be run before one with priority 10. Tasks with
the same priority are executed in the order in which they were added.</p>
<p>While a task is running it can query its own priority. If it’s not ready
yet for finishing, it can raise an exception to defer itself, optionally
changing its own priority. This is useful for the following cases:</p>
<ul class="simple">
<li>A task is trying to acquire locks, but those locks are still held by
other tasks. By deferring itself, the task gives others a chance to
run. This is especially useful when all workers are busy.</li>
<li>If a task decides it hasn’t gotten its locks in a long time, it can
start to increase its own priority.</li>
<li>Tasks waiting for long-running operations running asynchronously could
defer themselves while waiting for a long-running operation.</li>
</ul>
<p>With these changes, the job queue will be able to implement per-job
priorities.</p>
</div>
<div class="section" id="locking">
<span id="locking-priorities"></span><h5>Locking<a class="headerlink" href="#locking" title="Permalink to this headline"></a></h5>
<p>In order to support priorities in Ganeti’s own lock classes,
<code class="docutils literal"><span class="pre">locking.SharedLock</span></code> and <code class="docutils literal"><span class="pre">locking.LockSet</span></code>, the internal structure
of the former class needs to be changed. The last major change in this
area was done for Ganeti 2.1 and can be found in the respective
<a class="reference internal" href="design-2.1.html"><span class="doc">design document</span></a>.</p>
<p>The plain list (<code class="docutils literal"><span class="pre">[]</span></code>) used as a queue is replaced by a heap queue,
similar to the <a class="reference internal" href="#worker-pool">worker pool</a>. The heap or priority queue does automatic
sorting, thereby automatically taking care of priorities. For each
priority there’s a plain list with pending acquires, like the single
queue of pending acquires before this change.</p>
<p>When the lock is released, the code locates the list of pending acquires
for the highest priority waiting. The first condition (index 0) is
notified. Once all waiting threads received the notification, the
condition is removed from the list. If the list of conditions is empty
it’s removed from the heap queue.</p>
<p>Like before, shared acquires are grouped and skip ahead of exclusive
acquires if there’s already an existing shared acquire for a priority.
To accomplish this, a separate dictionary of shared acquires per
priority is maintained.</p>
<p>To simplify the code and reduce memory consumption, the concept of the
“active” and “inactive” condition for shared acquires is abolished. The
lock can’t predict what priorities the next acquires will use and even
keeping a cache can become computationally expensive for arguable
benefit (the underlying POSIX pipe, see <code class="docutils literal"><span class="pre">pipe(2)</span></code>, needs to be
re-created for each notification anyway).</p>
<p>The following diagram shows a possible state of the internal queue from
a high-level view. Conditions are shown as (waiting) threads. Assuming
no modifications are made to the queue (e.g. more acquires or timeouts),
the lock would be acquired by the threads in this order (concurrent
acquires in parentheses): <code class="docutils literal"><span class="pre">threadE1</span></code>, <code class="docutils literal"><span class="pre">threadE2</span></code>, (<code class="docutils literal"><span class="pre">threadS1</span></code>,
<code class="docutils literal"><span class="pre">threadS2</span></code>, <code class="docutils literal"><span class="pre">threadS3</span></code>), (<code class="docutils literal"><span class="pre">threadS4</span></code>, <code class="docutils literal"><span class="pre">threadS5</span></code>), <code class="docutils literal"><span class="pre">threadE3</span></code>,
<code class="docutils literal"><span class="pre">threadS6</span></code>, <code class="docutils literal"><span class="pre">threadE4</span></code>, <code class="docutils literal"><span class="pre">threadE5</span></code>.</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="p">[</span>
  <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="p">[</span><span class="n">exc</span><span class="o">/</span><span class="n">threadE1</span><span class="p">,</span> <span class="n">exc</span><span class="o">/</span><span class="n">threadE2</span><span class="p">,</span> <span class="n">shr</span><span class="o">/</span><span class="n">threadS1</span><span class="o">/</span><span class="n">threadS2</span><span class="o">/</span><span class="n">threadS3</span><span class="p">]),</span>
  <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="p">[</span><span class="n">shr</span><span class="o">/</span><span class="n">threadS4</span><span class="o">/</span><span class="n">threadS5</span><span class="p">]),</span>
  <span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="p">[</span><span class="n">exc</span><span class="o">/</span><span class="n">threadE3</span><span class="p">]),</span>
  <span class="p">(</span><span class="mi">33</span><span class="p">,</span> <span class="p">[</span><span class="n">shr</span><span class="o">/</span><span class="n">threadS6</span><span class="p">,</span> <span class="n">exc</span><span class="o">/</span><span class="n">threadE4</span><span class="p">,</span> <span class="n">exc</span><span class="o">/</span><span class="n">threadE5</span><span class="p">]),</span>
<span class="p">]</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="section" id="ipv6-support">
<h3><a class="toc-backref" href="#id22">IPv6 support</a><a class="headerlink" href="#ipv6-support" title="Permalink to this headline"></a></h3>
<p>Currently Ganeti does not support IPv6. This is true for nodes as well
as instances. Due to the fact that IPv4 exhaustion is threateningly near
the need of using IPv6 is increasing, especially given that bigger and
bigger clusters are supported.</p>
<div class="section" id="supported-ipv6-setup">
<h4><a class="toc-backref" href="#id23">Supported IPv6 setup</a><a class="headerlink" href="#supported-ipv6-setup" title="Permalink to this headline"></a></h4>
<p>In Ganeti 2.3 we introduce additionally to the ordinary pure IPv4
setup a hybrid IPv6/IPv4 mode. The latter works as follows:</p>
<ul class="simple">
<li>all nodes in a cluster have a primary IPv6 address</li>
<li>the master has a IPv6 address</li>
<li>all nodes <strong>must</strong> have a secondary IPv4 address</li>
</ul>
<p>The reason for this hybrid setup is that key components that Ganeti
depends on do not or only partially support IPv6. More precisely, Xen
does not support instance migration via IPv6 in version 3.4 and 4.0.
Similarly, KVM does not support instance migration nor VNC access for
IPv6 at the time of this writing.</p>
<p>This led to the decision of not supporting pure IPv6 Ganeti clusters, as
very important cluster operations would not have been possible. Using
IPv4 as secondary address does not affect any of the goals
of the IPv6 support: since secondary addresses do not need to be
publicly accessible, they need not be globally unique. In other words,
one can practically use private IPv4 secondary addresses just for
intra-cluster communication without propagating them across layer 3
boundaries.</p>
</div>
<div class="section" id="netutils-utilities-for-handling-common-network-tasks">
<h4><a class="toc-backref" href="#id24">netutils: Utilities for handling common network tasks</a><a class="headerlink" href="#netutils-utilities-for-handling-common-network-tasks" title="Permalink to this headline"></a></h4>
<p>Currently common utility functions are kept in the <code class="docutils literal"><span class="pre">utils</span></code> module.
Since this module grows bigger and bigger network-related functions are
moved to a separate module named <em>netutils</em>. Additionally all these
utilities will be IPv6-enabled.</p>
</div>
<div class="section" id="cluster-initialization">
<h4><a class="toc-backref" href="#id25">Cluster initialization</a><a class="headerlink" href="#cluster-initialization" title="Permalink to this headline"></a></h4>
<p>As mentioned above there will be two different setups in terms of IP
addressing: pure IPv4 and hybrid IPv6/IPv4 address. To choose that a
new cluster init parameter <em>–primary-ip-version</em> is introduced. This is
needed as a given name can resolve to both an IPv4 and IPv6 address on a
dual-stack host effectively making it impossible to infer that bit.</p>
<p>Once a cluster is initialized and the primary IP version chosen all
nodes that join have to conform to that setup. In the case of our
IPv6/IPv4 setup all nodes <em>must</em> have a secondary IPv4 address.</p>
<p>Furthermore we store the primary IP version in ssconf which is consulted
every time a daemon starts to determine the default bind address (either
<em>0.0.0.0</em> or <em>::</em>. In a IPv6/IPv4 setup we need to bind the Ganeti
daemon listening on network sockets to the IPv6 address.</p>
</div>
<div class="section" id="node-addition">
<h4><a class="toc-backref" href="#id26">Node addition</a><a class="headerlink" href="#node-addition" title="Permalink to this headline"></a></h4>
<p>When adding a new node to a IPv6/IPv4 cluster it must have a IPv6
address to be used as primary and a IPv4 address used as secondary. As
explained above, every time a daemon is started we use the cluster
primary IP version to determine to which any address to bind to. The
only exception to this is when a node is added to the cluster. In this
case there is no ssconf available when noded is started and therefore
the correct address needs to be passed to it.</p>
</div>
<div class="section" id="name-resolution">
<h4><a class="toc-backref" href="#id27">Name resolution</a><a class="headerlink" href="#name-resolution" title="Permalink to this headline"></a></h4>
<p>Since the gethostbyname*() functions do not support IPv6 name resolution
will be done by using the recommended getaddrinfo().</p>
</div>
<div class="section" id="ipv4-only-components">
<h4><a class="toc-backref" href="#id28">IPv4-only components</a><a class="headerlink" href="#ipv4-only-components" title="Permalink to this headline"></a></h4>
<table border="1" class="docutils">
<colgroup>
<col width="42%" />
<col width="28%" />
<col width="30%" />
</colgroup>
<thead valign="bottom">
<tr class="row-odd"><th class="head">Component</th>
<th class="head">IPv6 Status</th>
<th class="head">Planned Version</th>
</tr>
</thead>
<tbody valign="top">
<tr class="row-even"><td>Xen instance migration</td>
<td>Not supported</td>
<td>Xen 4.1: libxenlight</td>
</tr>
<tr class="row-odd"><td>KVM instance migration</td>
<td>Not supported</td>
<td>Unknown</td>
</tr>
<tr class="row-even"><td>KVM VNC access</td>
<td>Not supported</td>
<td>Unknown</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="section" id="privilege-separation">
<h3><a class="toc-backref" href="#id29">Privilege Separation</a><a class="headerlink" href="#privilege-separation" title="Permalink to this headline"></a></h3>
<div class="section" id="id5">
<h4><a class="toc-backref" href="#id30">Current state and shortcomings</a><a class="headerlink" href="#id5" title="Permalink to this headline"></a></h4>
<p>In Ganeti 2.2 we introduced privilege separation for the RAPI daemon.
This was done directly in the daemon’s code in the process of
daemonizing itself. Doing so leads to several potential issues. For
example, a file could be opened while the code is still running as
<code class="docutils literal"><span class="pre">root</span></code> and for some reason not be closed again. Even after changing
the user ID, the file descriptor can be written to.</p>
</div>
<div class="section" id="implementation">
<h4><a class="toc-backref" href="#id31">Implementation</a><a class="headerlink" href="#implementation" title="Permalink to this headline"></a></h4>
<p>To address these shortcomings, daemons will be started under the target
user right away. The <code class="docutils literal"><span class="pre">start-stop-daemon</span></code> utility used to start daemons
supports the <code class="docutils literal"><span class="pre">--chuid</span></code> option to change user and group ID before
starting the executable.</p>
<p>The intermediate solution for the RAPI daemon from Ganeti 2.2 will be
removed again.</p>
<p>Files written by the daemons may need to have an explicit owner and
group set (easily done through <code class="docutils literal"><span class="pre">utils.WriteFile</span></code>).</p>
<p>All SSH-related code is removed from the <code class="docutils literal"><span class="pre">ganeti.bootstrap</span></code> module and
core components and moved to a separate script. The core code will
simply assume a working SSH setup to be in place.</p>
</div>
<div class="section" id="security-domains">
<h4><a class="toc-backref" href="#id32">Security Domains</a><a class="headerlink" href="#security-domains" title="Permalink to this headline"></a></h4>
<p>In order to separate the permissions of file sets we separate them
into the following 3 overall security domain chunks:</p>
<ol class="arabic simple">
<li>Public: <code class="docutils literal"><span class="pre">0755</span></code> respectively <code class="docutils literal"><span class="pre">0644</span></code></li>
<li>Ganeti wide: shared between the daemons (gntdaemons)</li>
<li>Secret files: shared among a specific set of daemons/users</li>
</ol>
<p>So for point 3 this tables shows the correlation of the sets to groups
and their users:</p>
<table border="1" class="docutils">
<colgroup>
<col width="4%" />
<col width="14%" />
<col width="43%" />
<col width="38%" />
</colgroup>
<thead valign="bottom">
<tr class="row-odd"><th class="head">Set</th>
<th class="head">Group</th>
<th class="head">Users</th>
<th class="head">Description</th>
</tr>
</thead>
<tbody valign="top">
<tr class="row-even"><td>A</td>
<td>gntrapi</td>
<td>gntrapi, gntmasterd</td>
<td>Share data between
gntrapi and gntmasterd</td>
</tr>
<tr class="row-odd"><td>B</td>
<td>gntadmins</td>
<td>gntrapi, gntmasterd, <em>users</em></td>
<td>Shared between users who
needs to call gntmasterd</td>
</tr>
<tr class="row-even"><td>C</td>
<td>gntconfd</td>
<td>gntconfd, gntmasterd</td>
<td>Share data between
gntconfd and gntmasterd</td>
</tr>
<tr class="row-odd"><td>D</td>
<td>gntmasterd</td>
<td>gntmasterd</td>
<td>masterd only; Currently
only to redistribute the
configuration, has access
to all files under
<code class="docutils literal"><span class="pre">lib/ganeti</span></code></td>
</tr>
<tr class="row-even"><td>E</td>
<td>gntdaemons</td>
<td>gntmasterd, gntrapi, gntconfd</td>
<td>Shared between the various
Ganeti daemons to exchange
data</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="restricted-commands">
<h4><a class="toc-backref" href="#id33">Restricted commands</a><a class="headerlink" href="#restricted-commands" title="Permalink to this headline"></a></h4>
<p>The following commands still require root permissions to fulfill their
functions:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">gnt</span><span class="o">-</span><span class="n">cluster</span> <span class="p">{</span><span class="n">init</span><span class="o">|</span><span class="n">destroy</span><span class="o">|</span><span class="n">command</span><span class="o">|</span><span class="n">copyfile</span><span class="o">|</span><span class="n">rename</span><span class="o">|</span><span class="n">masterfailover</span><span class="o">|</span><span class="n">renew</span><span class="o">-</span><span class="n">crypto</span><span class="p">}</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">node</span> <span class="p">{</span><span class="n">add</span><span class="o">|</span><span class="n">remove</span><span class="p">}</span>
<span class="n">gnt</span><span class="o">-</span><span class="n">instance</span> <span class="p">{</span><span class="n">console</span><span class="p">}</span>
</pre></div>
</div>
</div>
<div class="section" id="directory-structure-and-permissions">
<h4><a class="toc-backref" href="#id34">Directory structure and permissions</a><a class="headerlink" href="#directory-structure-and-permissions" title="Permalink to this headline"></a></h4>
<p>Here’s how we propose to change the filesystem hierarchy and their
permissions.</p>
<p>Assuming it follows the defaults: <code class="docutils literal"><span class="pre">gnt${daemon}</span></code> for user and
the groups from the section <a class="reference internal" href="#security-domains">Security Domains</a>:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span>${localstatedir}/lib/ganeti/ (0755; gntmasterd:gntmasterd)
   cluster-domain-secret (0600; gntmasterd:gntmasterd)
   config.data (0640; gntmasterd:gntconfd)
   hmac.key (0440; gntmasterd:gntconfd)
   known_host (0644; gntmasterd:gntmasterd)
   queue/ (0700; gntmasterd:gntmasterd)
     archive/ (0700; gntmasterd:gntmasterd)
       * (0600; gntmasterd:gntmasterd)
     * (0600; gntmasterd:gntmasterd)
   rapi.pem (0440; gntrapi:gntrapi)
   rapi_users (0640; gntrapi:gntrapi)
   server.pem (0440; gntmasterd:gntmasterd)
   ssconf_* (0444; root:gntmasterd)
   uidpool/ (0750; root:gntmasterd)
   watcher.data (0600; root:gntmasterd)
${localstatedir}/run/ganeti/ (0770; gntmasterd:gntdaemons)
   socket/ (0750; gntmasterd:gntadmins)
     ganeti-master (0770; gntmasterd:gntadmins)
${localstatedir}/log/ganeti/ (0770; gntmasterd:gntdaemons)
   master-daemon.log (0600; gntmasterd:gntdaemons)
   rapi-daemon.log (0600; gntrapi:gntdaemons)
   conf-daemon.log (0600; gntconfd:gntdaemons)
   node-daemon.log (0600; gntnoded:gntdaemons)
</pre></div>
</div>
</div>
</div>
</div>
<div class="section" id="feature-changes">
<h2><a class="toc-backref" href="#id35">Feature changes</a><a class="headerlink" href="#feature-changes" title="Permalink to this headline"></a></h2>
</div>
<div class="section" id="external-interface-changes">
<h2><a class="toc-backref" href="#id36">External interface changes</a><a class="headerlink" href="#external-interface-changes" title="Permalink to this headline"></a></h2>
</div>
</div>


          </div>
        </div>
      </div>
      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
        <div class="sphinxsidebarwrapper">
  <h3><a href="index.html">Table Of Contents</a></h3>
  <ul>
<li><a class="reference internal" href="#">Ganeti 2.3 design</a><ul>
<li><a class="reference internal" href="#core-changes">Core changes</a><ul>
<li><a class="reference internal" href="#node-groups">Node Groups</a><ul>
<li><a class="reference internal" href="#current-state-and-shortcomings">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#proposed-changes">Proposed changes</a><ul>
<li><a class="reference internal" href="#node-group-management">Node group management</a></li>
<li><a class="reference internal" href="#node-group-attributes">Node group attributes</a></li>
<li><a class="reference internal" href="#node-group-operations">Node group operations</a></li>
<li><a class="reference internal" href="#instance-level-changes">Instance level changes</a></li>
<li><a class="reference internal" href="#internal-changes">Internal changes</a></li>
<li><a class="reference internal" href="#other-work-and-future-changes">Other work and future changes</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#scalability-issues-with-big-clusters">Scalability issues with big clusters</a><ul>
<li><a class="reference internal" href="#current-and-future-issues">Current and future issues</a></li>
<li><a class="reference internal" href="#cluster-state-cache">Cluster state cache</a><ul>
<li><a class="reference internal" href="#cache-structure">Cache structure</a></li>
<li><a class="reference internal" href="#cache-updates">Cache updates</a></li>
<li><a class="reference internal" href="#cache-invalidation">Cache invalidation</a></li>
<li><a class="reference internal" href="#cache-lifetime">Cache lifetime</a></li>
<li><a class="reference internal" href="#cache-usage">Cache usage</a></li>
</ul>
</li>
<li><a class="reference internal" href="#watcher-operation">Watcher operation</a><ul>
<li><a class="reference internal" href="#cost-of-periodic-cache-updating">Cost of periodic cache updating</a></li>
<li><a class="reference internal" href="#intra-node-group-scalability">Intra-node group scalability</a></li>
<li><a class="reference internal" href="#state-file-maintenance">State file maintenance</a></li>
</ul>
</li>
<li><a class="reference internal" href="#capacity-calculations">Capacity calculations</a><ul>
<li><a class="reference internal" href="#tspec-cluster-parameters">tspec cluster parameters</a></li>
<li><a class="reference internal" href="#opcode-details">OpCode details</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#node-flags">Node flags</a><ul>
<li><a class="reference internal" href="#id1">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#id2">Proposed changes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#job-priorities">Job priorities</a><ul>
<li><a class="reference internal" href="#id3">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#id4">Proposed changes</a><ul>
<li><a class="reference internal" href="#worker-pool">Worker pool</a></li>
<li><a class="reference internal" href="#locking">Locking</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#ipv6-support">IPv6 support</a><ul>
<li><a class="reference internal" href="#supported-ipv6-setup">Supported IPv6 setup</a></li>
<li><a class="reference internal" href="#netutils-utilities-for-handling-common-network-tasks">netutils: Utilities for handling common network tasks</a></li>
<li><a class="reference internal" href="#cluster-initialization">Cluster initialization</a></li>
<li><a class="reference internal" href="#node-addition">Node addition</a></li>
<li><a class="reference internal" href="#name-resolution">Name resolution</a></li>
<li><a class="reference internal" href="#ipv4-only-components">IPv4-only components</a></li>
</ul>
</li>
<li><a class="reference internal" href="#privilege-separation">Privilege Separation</a><ul>
<li><a class="reference internal" href="#id5">Current state and shortcomings</a></li>
<li><a class="reference internal" href="#implementation">Implementation</a></li>
<li><a class="reference internal" href="#security-domains">Security Domains</a></li>
<li><a class="reference internal" href="#restricted-commands">Restricted commands</a></li>
<li><a class="reference internal" href="#directory-structure-and-permissions">Directory structure and permissions</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#feature-changes">Feature changes</a></li>
<li><a class="reference internal" href="#external-interface-changes">External interface changes</a></li>
</ul>
</li>
</ul>

  <h4>Previous topic</h4>
  <p class="topless"><a href="design-2.2.html"
                        title="previous chapter">Ganeti 2.2 design</a></p>
  <h4>Next topic</h4>
  <p class="topless"><a href="design-htools-2.3.html"
                        title="next chapter">Synchronising htools to Ganeti 2.3</a></p>
  <div role="note" aria-label="source link">
    <h3>This Page</h3>
    <ul class="this-page-menu">
      <li><a href="_sources/design-2.3.rst.txt"
            rel="nofollow">Show Source</a></li>
    </ul>
   </div>
<div id="searchbox" style="display: none" role="search">
  <h3>Quick search</h3>
    <form class="search" action="search.html" method="get">
      <div><input type="text" name="q" /></div>
      <div><input type="submit" value="Go" /></div>
      <input type="hidden" name="check_keywords" value="yes" />
      <input type="hidden" name="area" value="default" />
    </form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
        </div>
      </div>
      <div class="clearer"></div>
    </div>
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="design-htools-2.3.html" title="Synchronising htools to Ganeti 2.3"
             >next</a></li>
        <li class="right" >
          <a href="design-2.2.html" title="Ganeti 2.2 design"
             >previous</a> |</li>
        <li class="nav-item nav-item-0"><a href="index.html">Ganeti 2.16.0~rc2 documentation</a> &#187;</li> 
      </ul>
    </div>
    <div class="footer" role="contentinfo">
        &#169; Copyright 2018, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Google Inc..
      Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.7.
    </div>
  </body>
</html>