Suppose I have this extension on FixedWidthInteger:
public extension FixedWidthInteger {
func binaryString() -> String {
print("Using Non-specialized")
var result: [String] = []
for i in 0..<(Self.bitWidth / 8) {
let byte = UInt8(truncatingIfNeeded: self >> (i * 8))
let byteString = String(byte, radix: 2)
let padding = String(repeating: "0",
count: 8 - byteString.count)
result.append(padding + byteString)
}
return "0b" + result.reversed().joined(separator: "_")
}
}
But I optimize the UInt8 case as follows:
public extension UInt8 {
static let allBinaryStringsForByte =
["0b00000000", // 0
"0b00000001", // 1
"0b00000010", // 2
// etc etc up to:
"0b11111110", // 254
"0b11111111" // 255
]
func binaryString() -> String {
print("Using UInt8 specialized")
return UInt8.allBinaryStringsForByte[Int(self)]
}
}
The following both call the correct version of binaryString as expected:
_ = UInt8(1).binaryString()
_ = Int(1).binaryString()
// -> Using UInt8 specialized
// -> Using Non-specialized
However, if I create a generic function such as:
func someFunc<FWI: FixedWidthInteger>(_ value: FWI) {
_ = value.binaryString()
}
then the following does not work as I expected:
someFunc(UInt8(1))
someFunc(Int(1))
// -> Using Non-specialized
// -> Using Non-specialized
What am I overlooking? Surely the compiler knows the type of the FWI value passed into someFunc, so why does it choose the 'wrong' one in the UInt8 case?
(P.S I'm not really trying to come up with an efficient binaryString() function.)