22
22
from google .cloud import monitoring
23
23
24
24
25
-
26
25
def get_cpu_load ():
27
26
"""Returns the most recent Cloud Bigtable CPU load measurement.
28
27
@@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
51
50
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
52
51
scale_up (bool): If true, scale up, otherwise scale down
53
52
"""
54
- _MIN_NODE_COUNT = 3
55
- """
56
- The minimum number of nodes to use. The default minimum is 3. If you have a
57
- lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58
- clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59
- metric is useful in figuring out the minimum number of nodes.
60
- """
61
53
62
- _MAX_NODE_COUNT = 30
63
- """
64
- The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65
- If you need more quota, you can request more by following the instructions
66
- <a href="https://cloud.google.com/bigtable/quota">here</a>.
67
- """
54
+ # The minimum number of nodes to use. The default minimum is 3. If you have
55
+ # a lot of data, the rule of thumb is to not go below 2.5 TB per node for
56
+ # SSD lusters, and 8 TB for HDD. The
57
+ # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring
58
+ # out the minimum number of nodes.
59
+ min_node_count = 3
60
+
61
+ # The maximum number of nodes to use. The default maximum is 30 nodes per
62
+ # zone. If you need more quota, you can request more by following the
63
+ # instructions at https://cloud.google.com/bigtable/quota.
64
+ max_node_count = 30
65
+
66
+ # The number of nodes to change the cluster by.
67
+ size_change_step = 3
68
68
69
- _SIZE_CHANGE_STEP = 3
70
- """The number of nodes to change the cluster by."""
71
69
# [START bigtable_scale]
72
70
bigtable_client = bigtable .Client (admin = True )
73
71
instance = bigtable_client .instance (bigtable_instance )
@@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
79
77
current_node_count = cluster .serve_nodes
80
78
81
79
if scale_up :
82
- if current_node_count < _MAX_NODE_COUNT :
83
- new_node_count = min (current_node_count + 3 , _MAX_NODE_COUNT )
80
+ if current_node_count < max_node_count :
81
+ new_node_count = min (current_node_count + 3 , max_node_count )
84
82
cluster .serve_nodes = new_node_count
85
83
cluster .update ()
86
84
print ('Scaled up from {} to {} nodes.' .format (
87
85
current_node_count , new_node_count ))
88
86
else :
89
- if current_node_count > _MIN_NODE_COUNT :
87
+ if current_node_count > min_node_count :
90
88
new_node_count = max (
91
- current_node_count - _SIZE_CHANGE_STEP , _MIN_NODE_COUNT )
89
+ current_node_count - size_change_step , min_node_count )
92
90
cluster .serve_nodes = new_node_count
93
91
cluster .update ()
94
92
print ('Scaled down from {} to {} nodes.' .format (
0 commit comments