Sure, no problem. It’s pretty bare-bones:
struct Statistics<T: FloatingPoint> {
private var mean : T = 0
private var ssqDev: T = 0
private(set) var count: T = 0
private(set) var min : T = +.infinity
private(set) var max : T = -.infinity
var average : T { return (count > 0) ? mean : .nan }
var variance: T { return (count > 1) ? ssqDev / (count - 1) : .nan }
var standardDeviation: T { return variance.squareRoot() }
init() {}
init<S: Sequence>(_ values: S) where S.Element == T {
addValues(values)
}
mutating func addValues<S: Sequence>(_ values: S) where S.Element == T {
for x in values { addValue(x) }
}
mutating func addValue(_ value: T) {
count += 1
min = Swift.min(min, value)
max = Swift.max(max, value)
let diff = value - mean
let frac = diff / count
mean += frac
ssqDev += diff * (diff - frac)
}
}
For my use-cases, I needed unbiased sample variance, hence Bessel’s correction. Also, there exists an error-compensating version of the “addValue
” algorithm, but I didn’t need it so I went with the simple approach.
Tangentially, notice that I had to write “Swift.min()
”, because of SR–2450.