42180bf84d76c952f5cfa1158e452a3f8331230b
[ganeti-github.git] / src / Ganeti / HTools / Node.hs
1 {-| Module describing a node.
2
3 All updates are functional (copy-based) and return a new node with
4 updated value.
5 -}
6
7 {-
8
9 Copyright (C) 2009, 2010, 2011, 2012, 2013 Google Inc.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 02110-1301, USA.
25
26 -}
27
28 module Ganeti.HTools.Node
29 ( Node(..)
30 , List
31 -- * Constructor
32 , create
33 -- ** Finalization after data loading
34 , buildPeers
35 , setIdx
36 , setAlias
37 , setOffline
38 , setXmem
39 , setFmem
40 , setPri
41 , setSec
42 , setMdsk
43 , setMcpu
44 , setPolicy
45 -- * Tag maps
46 , addTags
47 , delTags
48 , rejectAddTags
49 -- * Instance (re)location
50 , removePri
51 , removeSec
52 , addPri
53 , addPriEx
54 , addSec
55 , addSecEx
56 -- * Stats
57 , availDisk
58 , availMem
59 , availCpu
60 , iMem
61 , iDsk
62 , conflictingPrimaries
63 -- * Formatting
64 , defaultFields
65 , showHeader
66 , showField
67 , list
68 -- * Misc stuff
69 , AssocList
70 , AllocElement
71 , noSecondary
72 , computeGroups
73 , mkNodeGraph
74 ) where
75
76 import Control.Monad (liftM, liftM2)
77 import qualified Data.Foldable as Foldable
78 import Data.Function (on)
79 import qualified Data.Graph as Graph
80 import qualified Data.IntMap as IntMap
81 import Data.List hiding (group)
82 import qualified Data.Map as Map
83 import Data.Ord (comparing)
84 import Text.Printf (printf)
85
86 import qualified Ganeti.HTools.Container as Container
87 import qualified Ganeti.HTools.Instance as Instance
88 import qualified Ganeti.HTools.PeerMap as P
89
90 import Ganeti.BasicTypes
91 import qualified Ganeti.HTools.Types as T
92
93 -- * Type declarations
94
95 -- | The tag map type.
96 type TagMap = Map.Map String Int
97
98 -- | The node type.
99 data Node = Node
100 { name :: String -- ^ The node name
101 , alias :: String -- ^ The shortened name (for display purposes)
102 , tMem :: Double -- ^ Total memory (MiB)
103 , nMem :: Int -- ^ Node memory (MiB)
104 , fMem :: Int -- ^ Free memory (MiB)
105 , xMem :: Int -- ^ Unaccounted memory (MiB)
106 , tDsk :: Double -- ^ Total disk space (MiB)
107 , fDsk :: Int -- ^ Free disk space (MiB)
108 , tCpu :: Double -- ^ Total CPU count
109 , uCpu :: Int -- ^ Used VCPU count
110 , spindleCount :: Int -- ^ Node spindles (spindle_count node parameter)
111 , pList :: [T.Idx] -- ^ List of primary instance indices
112 , sList :: [T.Idx] -- ^ List of secondary instance indices
113 , idx :: T.Ndx -- ^ Internal index for book-keeping
114 , peers :: P.PeerMap -- ^ Pnode to instance mapping
115 , failN1 :: Bool -- ^ Whether the node has failed n1
116 , rMem :: Int -- ^ Maximum memory needed for failover by
117 -- primaries of this node
118 , pMem :: Double -- ^ Percent of free memory
119 , pDsk :: Double -- ^ Percent of free disk
120 , pRem :: Double -- ^ Percent of reserved memory
121 , pCpu :: Double -- ^ Ratio of virtual to physical CPUs
122 , mDsk :: Double -- ^ Minimum free disk ratio
123 , loDsk :: Int -- ^ Autocomputed from mDsk low disk
124 -- threshold
125 , hiCpu :: Int -- ^ Autocomputed from mCpu high cpu
126 -- threshold
127 , hiSpindles :: Double -- ^ Auto-computed from policy spindle_ratio
128 -- and the node spindle count
129 , instSpindles :: Double -- ^ Spindles used by instances
130 , offline :: Bool -- ^ Whether the node should not be used for
131 -- allocations and skipped from score
132 -- computations
133 , utilPool :: T.DynUtil -- ^ Total utilisation capacity
134 , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
135 , pTags :: TagMap -- ^ Primary instance exclusion tags and their count
136 , group :: T.Gdx -- ^ The node's group (index)
137 , iPolicy :: T.IPolicy -- ^ The instance policy (of the node's group)
138 } deriving (Show, Eq)
139
140 instance T.Element Node where
141 nameOf = name
142 idxOf = idx
143 setAlias = setAlias
144 setIdx = setIdx
145 allNames n = [name n, alias n]
146
147 -- | A simple name for the int, node association list.
148 type AssocList = [(T.Ndx, Node)]
149
150 -- | A simple name for a node map.
151 type List = Container.Container Node
152
153 -- | A simple name for an allocation element (here just for logistic
154 -- reasons).
155 type AllocElement = (List, Instance.Instance, [Node], T.Score)
156
157 -- | Constant node index for a non-moveable instance.
158 noSecondary :: T.Ndx
159 noSecondary = -1
160
161 -- * Helper functions
162
163 -- | Add a tag to a tagmap.
164 addTag :: TagMap -> String -> TagMap
165 addTag t s = Map.insertWith (+) s 1 t
166
167 -- | Add multiple tags.
168 addTags :: TagMap -> [String] -> TagMap
169 addTags = foldl' addTag
170
171 -- | Adjust or delete a tag from a tagmap.
172 delTag :: TagMap -> String -> TagMap
173 delTag t s = Map.update (\v -> if v > 1
174 then Just (v-1)
175 else Nothing)
176 s t
177
178 -- | Remove multiple tags.
179 delTags :: TagMap -> [String] -> TagMap
180 delTags = foldl' delTag
181
182 -- | Check if we can add a list of tags to a tagmap.
183 rejectAddTags :: TagMap -> [String] -> Bool
184 rejectAddTags t = any (`Map.member` t)
185
186 -- | Check how many primary instances have conflicting tags. The
187 -- algorithm to compute this is to sum the count of all tags, then
188 -- subtract the size of the tag map (since each tag has at least one,
189 -- non-conflicting instance); this is equivalent to summing the
190 -- values in the tag map minus one.
191 conflictingPrimaries :: Node -> Int
192 conflictingPrimaries (Node { pTags = t }) = Foldable.sum t - Map.size t
193
194 -- | Helper function to increment a base value depending on the passed
195 -- boolean argument.
196 incIf :: (Num a) => Bool -> a -> a -> a
197 incIf True base delta = base + delta
198 incIf False base _ = base
199
200 -- | Helper function to decrement a base value depending on the passed
201 -- boolean argument.
202 decIf :: (Num a) => Bool -> a -> a -> a
203 decIf True base delta = base - delta
204 decIf False base _ = base
205
206 -- * Initialization functions
207
208 -- | Create a new node.
209 --
210 -- The index and the peers maps are empty, and will be need to be
211 -- update later via the 'setIdx' and 'buildPeers' functions.
212 create :: String -> Double -> Int -> Int -> Double
213 -> Int -> Double -> Bool -> Int -> T.Gdx -> Node
214 create name_init mem_t_init mem_n_init mem_f_init
215 dsk_t_init dsk_f_init cpu_t_init offline_init spindles_init
216 group_init =
217 Node { name = name_init
218 , alias = name_init
219 , tMem = mem_t_init
220 , nMem = mem_n_init
221 , fMem = mem_f_init
222 , tDsk = dsk_t_init
223 , fDsk = dsk_f_init
224 , tCpu = cpu_t_init
225 , spindleCount = spindles_init
226 , uCpu = 0
227 , pList = []
228 , sList = []
229 , failN1 = True
230 , idx = -1
231 , peers = P.empty
232 , rMem = 0
233 , pMem = fromIntegral mem_f_init / mem_t_init
234 , pDsk = computePDsk dsk_f_init dsk_t_init
235 , pRem = 0
236 , pCpu = 0
237 , offline = offline_init
238 , xMem = 0
239 , mDsk = T.defReservedDiskRatio
240 , loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init
241 , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio T.defIPolicy) cpu_t_init
242 , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio T.defIPolicy)
243 spindles_init
244 , instSpindles = 0
245 , utilPool = T.baseUtil
246 , utilLoad = T.zeroUtil
247 , pTags = Map.empty
248 , group = group_init
249 , iPolicy = T.defIPolicy
250 }
251
252 -- | Conversion formula from mDsk\/tDsk to loDsk.
253 mDskToloDsk :: Double -> Double -> Int
254 mDskToloDsk mval = floor . (mval *)
255
256 -- | Conversion formula from mCpu\/tCpu to hiCpu.
257 mCpuTohiCpu :: Double -> Double -> Int
258 mCpuTohiCpu mval = floor . (mval *)
259
260 -- | Conversiojn formula from spindles and spindle ratio to hiSpindles.
261 computeHiSpindles :: Double -> Int -> Double
262 computeHiSpindles spindle_ratio = (spindle_ratio *) . fromIntegral
263
264 -- | Changes the index.
265 --
266 -- This is used only during the building of the data structures.
267 setIdx :: Node -> T.Ndx -> Node
268 setIdx t i = t {idx = i}
269
270 -- | Changes the alias.
271 --
272 -- This is used only during the building of the data structures.
273 setAlias :: Node -> String -> Node
274 setAlias t s = t { alias = s }
275
276 -- | Sets the offline attribute.
277 setOffline :: Node -> Bool -> Node
278 setOffline t val = t { offline = val }
279
280 -- | Sets the unnaccounted memory.
281 setXmem :: Node -> Int -> Node
282 setXmem t val = t { xMem = val }
283
284 -- | Sets the max disk usage ratio.
285 setMdsk :: Node -> Double -> Node
286 setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) }
287
288 -- | Sets the max cpu usage ratio. This will update the node's
289 -- ipolicy, losing sharing (but it should be a seldomly done operation).
290 setMcpu :: Node -> Double -> Node
291 setMcpu t val =
292 let new_ipol = (iPolicy t) { T.iPolicyVcpuRatio = val }
293 in t { hiCpu = mCpuTohiCpu val (tCpu t), iPolicy = new_ipol }
294
295 -- | Sets the policy.
296 setPolicy :: T.IPolicy -> Node -> Node
297 setPolicy pol node =
298 node { iPolicy = pol
299 , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio pol) (tCpu node)
300 , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio pol)
301 (spindleCount node)
302 }
303
304 -- | Computes the maximum reserved memory for peers from a peer map.
305 computeMaxRes :: P.PeerMap -> P.Elem
306 computeMaxRes = P.maxElem
307
308 -- | Builds the peer map for a given node.
309 buildPeers :: Node -> Instance.List -> Node
310 buildPeers t il =
311 let mdata = map
312 (\i_idx -> let inst = Container.find i_idx il
313 mem = if Instance.usesSecMem inst
314 then Instance.mem inst
315 else 0
316 in (Instance.pNode inst, mem))
317 (sList t)
318 pmap = P.accumArray (+) mdata
319 new_rmem = computeMaxRes pmap
320 new_failN1 = fMem t <= new_rmem
321 new_prem = fromIntegral new_rmem / tMem t
322 in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
323
324 -- | Calculate the new spindle usage
325 calcSpindleUse :: Node -> Instance.Instance -> Double
326 calcSpindleUse n i = incIf (Instance.usesLocalStorage i) (instSpindles n)
327 (fromIntegral $ Instance.spindleUse i)
328
329 -- | Assigns an instance to a node as primary and update the used VCPU
330 -- count, utilisation data and tags map.
331 setPri :: Node -> Instance.Instance -> Node
332 setPri t inst = t { pList = Instance.idx inst:pList t
333 , uCpu = new_count
334 , pCpu = fromIntegral new_count / tCpu t
335 , utilLoad = utilLoad t `T.addUtil` Instance.util inst
336 , pTags = addTags (pTags t) (Instance.exclTags inst)
337 , instSpindles = calcSpindleUse t inst
338 }
339 where new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst)
340 (uCpu t )
341
342 -- | Assigns an instance to a node as secondary without other updates.
343 setSec :: Node -> Instance.Instance -> Node
344 setSec t inst = t { sList = Instance.idx inst:sList t
345 , utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
346 T.dskWeight (Instance.util inst) }
347 , instSpindles = calcSpindleUse t inst
348 }
349 where old_load = utilLoad t
350
351 -- | Computes the new 'pDsk' value, handling nodes without local disk
352 -- storage (we consider all their disk used).
353 computePDsk :: Int -> Double -> Double
354 computePDsk _ 0 = 1
355 computePDsk used total = fromIntegral used / total
356
357 -- * Update functions
358
359 -- | Sets the free memory.
360 setFmem :: Node -> Int -> Node
361 setFmem t new_mem =
362 let new_n1 = new_mem < rMem t
363 new_mp = fromIntegral new_mem / tMem t
364 in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
365
366 -- | Removes a primary instance.
367 removePri :: Node -> Instance.Instance -> Node
368 removePri t inst =
369 let iname = Instance.idx inst
370 i_online = Instance.notOffline inst
371 uses_disk = Instance.usesLocalStorage inst
372 new_plist = delete iname (pList t)
373 new_mem = incIf i_online (fMem t) (Instance.mem inst)
374 new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst)
375 new_spindles = decIf uses_disk (instSpindles t) 1
376 new_mp = fromIntegral new_mem / tMem t
377 new_dp = computePDsk new_dsk (tDsk t)
378 new_failn1 = new_mem <= rMem t
379 new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst)
380 new_rcpu = fromIntegral new_ucpu / tCpu t
381 new_load = utilLoad t `T.subUtil` Instance.util inst
382 in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
383 , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
384 , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
385 , pTags = delTags (pTags t) (Instance.exclTags inst)
386 , instSpindles = new_spindles
387 }
388
389 -- | Removes a secondary instance.
390 removeSec :: Node -> Instance.Instance -> Node
391 removeSec t inst =
392 let iname = Instance.idx inst
393 uses_disk = Instance.usesLocalStorage inst
394 cur_dsk = fDsk t
395 pnode = Instance.pNode inst
396 new_slist = delete iname (sList t)
397 new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst)
398 new_spindles = decIf uses_disk (instSpindles t) 1
399 old_peers = peers t
400 old_peem = P.find pnode old_peers
401 new_peem = decIf (Instance.usesSecMem inst) old_peem (Instance.mem inst)
402 new_peers = if new_peem > 0
403 then P.add pnode new_peem old_peers
404 else P.remove pnode old_peers
405 old_rmem = rMem t
406 new_rmem = if old_peem < old_rmem
407 then old_rmem
408 else computeMaxRes new_peers
409 new_prem = fromIntegral new_rmem / tMem t
410 new_failn1 = fMem t <= new_rmem
411 new_dp = computePDsk new_dsk (tDsk t)
412 old_load = utilLoad t
413 new_load = old_load { T.dskWeight = T.dskWeight old_load -
414 T.dskWeight (Instance.util inst) }
415 in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
416 , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
417 , pRem = new_prem, utilLoad = new_load
418 , instSpindles = new_spindles
419 }
420
421 -- | Adds a primary instance (basic version).
422 addPri :: Node -> Instance.Instance -> T.OpResult Node
423 addPri = addPriEx False
424
425 -- | Adds a primary instance (extended version).
426 addPriEx :: Bool -- ^ Whether to override the N+1 and
427 -- other /soft/ checks, useful if we
428 -- come from a worse status
429 -- (e.g. offline)
430 -> Node -- ^ The target node
431 -> Instance.Instance -- ^ The instance to add
432 -> T.OpResult Node -- ^ The result of the operation,
433 -- either the new version of the node
434 -- or a failure mode
435 addPriEx force t inst =
436 let iname = Instance.idx inst
437 i_online = Instance.notOffline inst
438 uses_disk = Instance.usesLocalStorage inst
439 cur_dsk = fDsk t
440 new_mem = decIf i_online (fMem t) (Instance.mem inst)
441 new_dsk = decIf uses_disk cur_dsk (Instance.dsk inst)
442 new_spindles = incIf uses_disk (instSpindles t) 1
443 new_failn1 = new_mem <= rMem t
444 new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst)
445 new_pcpu = fromIntegral new_ucpu / tCpu t
446 new_dp = computePDsk new_dsk (tDsk t)
447 l_cpu = T.iPolicyVcpuRatio $ iPolicy t
448 new_load = utilLoad t `T.addUtil` Instance.util inst
449 inst_tags = Instance.exclTags inst
450 old_tags = pTags t
451 strict = not force
452 in case () of
453 _ | new_mem <= 0 -> Bad T.FailMem
454 | uses_disk && new_dsk <= 0 -> Bad T.FailDisk
455 | uses_disk && mDsk t > new_dp && strict -> Bad T.FailDisk
456 | uses_disk && new_spindles > hiSpindles t
457 && strict -> Bad T.FailDisk
458 | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
459 | l_cpu >= 0 && l_cpu < new_pcpu && strict -> Bad T.FailCPU
460 | rejectAddTags old_tags inst_tags -> Bad T.FailTags
461 | otherwise ->
462 let new_plist = iname:pList t
463 new_mp = fromIntegral new_mem / tMem t
464 r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
465 , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
466 , uCpu = new_ucpu, pCpu = new_pcpu
467 , utilLoad = new_load
468 , pTags = addTags old_tags inst_tags
469 , instSpindles = new_spindles
470 }
471 in Ok r
472
473 -- | Adds a secondary instance (basic version).
474 addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
475 addSec = addSecEx False
476
477 -- | Adds a secondary instance (extended version).
478 addSecEx :: Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
479 addSecEx force t inst pdx =
480 let iname = Instance.idx inst
481 old_peers = peers t
482 old_mem = fMem t
483 new_dsk = fDsk t - Instance.dsk inst
484 new_spindles = instSpindles t + 1
485 secondary_needed_mem = if Instance.usesSecMem inst
486 then Instance.mem inst
487 else 0
488 new_peem = P.find pdx old_peers + secondary_needed_mem
489 new_peers = P.add pdx new_peem old_peers
490 new_rmem = max (rMem t) new_peem
491 new_prem = fromIntegral new_rmem / tMem t
492 new_failn1 = old_mem <= new_rmem
493 new_dp = computePDsk new_dsk (tDsk t)
494 old_load = utilLoad t
495 new_load = old_load { T.dskWeight = T.dskWeight old_load +
496 T.dskWeight (Instance.util inst) }
497 strict = not force
498 in case () of
499 _ | not (Instance.hasSecondary inst) -> Bad T.FailDisk
500 | new_dsk <= 0 -> Bad T.FailDisk
501 | mDsk t > new_dp && strict -> Bad T.FailDisk
502 | new_spindles > hiSpindles t && strict -> Bad T.FailDisk
503 | secondary_needed_mem >= old_mem && strict -> Bad T.FailMem
504 | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
505 | otherwise ->
506 let new_slist = iname:sList t
507 r = t { sList = new_slist, fDsk = new_dsk
508 , peers = new_peers, failN1 = new_failn1
509 , rMem = new_rmem, pDsk = new_dp
510 , pRem = new_prem, utilLoad = new_load
511 , instSpindles = new_spindles
512 }
513 in Ok r
514
515 -- * Stats functions
516
517 -- | Computes the amount of available disk on a given node.
518 availDisk :: Node -> Int
519 availDisk t =
520 let _f = fDsk t
521 _l = loDsk t
522 in if _f < _l
523 then 0
524 else _f - _l
525
526 -- | Computes the amount of used disk on a given node.
527 iDsk :: Node -> Int
528 iDsk t = truncate (tDsk t) - fDsk t
529
530 -- | Computes the amount of available memory on a given node.
531 availMem :: Node -> Int
532 availMem t =
533 let _f = fMem t
534 _l = rMem t
535 in if _f < _l
536 then 0
537 else _f - _l
538
539 -- | Computes the amount of available memory on a given node.
540 availCpu :: Node -> Int
541 availCpu t =
542 let _u = uCpu t
543 _l = hiCpu t
544 in if _l >= _u
545 then _l - _u
546 else 0
547
548 -- | The memory used by instances on a given node.
549 iMem :: Node -> Int
550 iMem t = truncate (tMem t) - nMem t - xMem t - fMem t
551
552 -- * Node graph functions
553 -- These functions do the transformations needed so that nodes can be
554 -- represented as a graph connected by the instances that are replicated
555 -- on them.
556
557 -- * Making of a Graph from a node/instance list
558
559 -- | Transform an instance into a list of edges on the node graph
560 instanceToEdges :: Instance.Instance -> [Graph.Edge]
561 instanceToEdges i
562 | Instance.hasSecondary i = [(pnode,snode), (snode,pnode)]
563 | otherwise = []
564 where pnode = Instance.pNode i
565 snode = Instance.sNode i
566
567 -- | Transform the list of instances into list of destination edges
568 instancesToEdges :: Instance.List -> [Graph.Edge]
569 instancesToEdges = concatMap instanceToEdges . Container.elems
570
571 -- | Transform the list of nodes into vertices bounds.
572 -- Returns Nothing is the list is empty.
573 nodesToBounds :: List -> Maybe Graph.Bounds
574 nodesToBounds nl = liftM2 (,) nmin nmax
575 where nmin = fmap (fst . fst) (IntMap.minViewWithKey nl)
576 nmax = fmap (fst . fst) (IntMap.maxViewWithKey nl)
577
578 -- | Transform a Node + Instance list into a NodeGraph type.
579 -- Returns Nothing if the node list is empty.
580 mkNodeGraph :: List -> Instance.List -> Maybe Graph.Graph
581 mkNodeGraph nl il =
582 liftM (`Graph.buildG` instancesToEdges il) (nodesToBounds nl)
583
584 -- * Display functions
585
586 -- | Return a field for a given node.
587 showField :: Node -- ^ Node which we're querying
588 -> String -- ^ Field name
589 -> String -- ^ Field value as string
590 showField t field =
591 case field of
592 "idx" -> printf "%4d" $ idx t
593 "name" -> alias t
594 "fqdn" -> name t
595 "status" -> case () of
596 _ | offline t -> "-"
597 | failN1 t -> "*"
598 | otherwise -> " "
599 "tmem" -> printf "%5.0f" $ tMem t
600 "nmem" -> printf "%5d" $ nMem t
601 "xmem" -> printf "%5d" $ xMem t
602 "fmem" -> printf "%5d" $ fMem t
603 "imem" -> printf "%5d" $ iMem t
604 "rmem" -> printf "%5d" $ rMem t
605 "amem" -> printf "%5d" $ fMem t - rMem t
606 "tdsk" -> printf "%5.0f" $ tDsk t / 1024
607 "fdsk" -> printf "%5d" $ fDsk t `div` 1024
608 "tcpu" -> printf "%4.0f" $ tCpu t
609 "ucpu" -> printf "%4d" $ uCpu t
610 "pcnt" -> printf "%3d" $ length (pList t)
611 "scnt" -> printf "%3d" $ length (sList t)
612 "plist" -> show $ pList t
613 "slist" -> show $ sList t
614 "pfmem" -> printf "%6.4f" $ pMem t
615 "pfdsk" -> printf "%6.4f" $ pDsk t
616 "rcpu" -> printf "%5.2f" $ pCpu t
617 "cload" -> printf "%5.3f" uC
618 "mload" -> printf "%5.3f" uM
619 "dload" -> printf "%5.3f" uD
620 "nload" -> printf "%5.3f" uN
621 "ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) .
622 Map.toList $ pTags t
623 "peermap" -> show $ peers t
624 "spindle_count" -> show $ spindleCount t
625 "hi_spindles" -> show $ hiSpindles t
626 "inst_spindles" -> show $ instSpindles t
627 _ -> T.unknownField
628 where
629 T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
630 T.dskWeight = uD, T.netWeight = uN } = utilLoad t
631
632 -- | Returns the header and numeric propery of a field.
633 showHeader :: String -> (String, Bool)
634 showHeader field =
635 case field of
636 "idx" -> ("Index", True)
637 "name" -> ("Name", False)
638 "fqdn" -> ("Name", False)
639 "status" -> ("F", False)
640 "tmem" -> ("t_mem", True)
641 "nmem" -> ("n_mem", True)
642 "xmem" -> ("x_mem", True)
643 "fmem" -> ("f_mem", True)
644 "imem" -> ("i_mem", True)
645 "rmem" -> ("r_mem", True)
646 "amem" -> ("a_mem", True)
647 "tdsk" -> ("t_dsk", True)
648 "fdsk" -> ("f_dsk", True)
649 "tcpu" -> ("pcpu", True)
650 "ucpu" -> ("vcpu", True)
651 "pcnt" -> ("pcnt", True)
652 "scnt" -> ("scnt", True)
653 "plist" -> ("primaries", True)
654 "slist" -> ("secondaries", True)
655 "pfmem" -> ("p_fmem", True)
656 "pfdsk" -> ("p_fdsk", True)
657 "rcpu" -> ("r_cpu", True)
658 "cload" -> ("lCpu", True)
659 "mload" -> ("lMem", True)
660 "dload" -> ("lDsk", True)
661 "nload" -> ("lNet", True)
662 "ptags" -> ("PrimaryTags", False)
663 "peermap" -> ("PeerMap", False)
664 "spindle_count" -> ("NodeSpindles", True)
665 "hi_spindles" -> ("MaxSpindles", True)
666 "inst_spindles" -> ("InstSpindles", True)
667 -- TODO: add node fields (group.uuid, group)
668 _ -> (T.unknownField, False)
669
670 -- | String converter for the node list functionality.
671 list :: [String] -> Node -> [String]
672 list fields t = map (showField t) fields
673
674 -- | Constant holding the fields we're displaying by default.
675 defaultFields :: [String]
676 defaultFields =
677 [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
678 , "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt"
679 , "pfmem", "pfdsk", "rcpu"
680 , "cload", "mload", "dload", "nload" ]
681
682 {-# ANN computeGroups "HLint: ignore Use alternative" #-}
683 -- | Split a list of nodes into a list of (node group UUID, list of
684 -- associated nodes).
685 computeGroups :: [Node] -> [(T.Gdx, [Node])]
686 computeGroups nodes =
687 let nodes' = sortBy (comparing group) nodes
688 nodes'' = groupBy ((==) `on` group) nodes'
689 -- use of head here is OK, since groupBy returns non-empty lists; if
690 -- you remove groupBy, also remove use of head
691 in map (\nl -> (group (head nl), nl)) nodes''