In order to write/read hardware registers on our platform, I've added two special functions to our standard library...
@_transparent
@_effects(readwrite)
public func _rawPointerWrite<Pointee>(address: Int, value: Pointee) {
Builtin.assign(value, UnsafeMutablePointer<Pointee>(knownNotNilBitPattern: address)._rawValue)
}
@_transparent
@_effects(readwrite)
public func _rawPointerRead<Pointee>(address: Int) -> Pointee {
Builtin.loadRaw(UnsafeMutablePointer<Pointee>(knownNotNilBitPattern: address)._rawValue)
}
...which we don't intend clients to use in their code, but we will use in some of our low-level hardware libraries.
I'm trying to debug how some SIL optimisations proceed on this.
In short, if I write this code...
_rawPointerWrite(address: 0x25, value: 1)
_rawPointerWrite(address: 0x25, value: 0)
...which is something you might genuinely need to do in hardware, e.g. to send signals on a pin... then with -O
swift optimises this down to only the last write, which breaks our hardware libraries. Because I'm using Builtins, I sort of thought these would be "left alone" and not optimised like this. I want to investigate what's doing the optimising, so I want to work out which pass does this.
Here is the raw sil produced...
sil_stage raw
import Builtin
import Swift
import SwiftShims
// main
sil [ossa] @main : $@convention(c) (Int32, UnsafeMutablePointer<Optional<UnsafeMutablePointer<Int8>>>) -> Int32 {
bb0(%0 : $Int32, %1 : $UnsafeMutablePointer<Optional<UnsafeMutablePointer<Int8>>>):
%2 = integer_literal $Builtin.IntLiteral, 37 // user: %5
%3 = metatype $@thin Int.Type // user: %5
// function_ref Int.init(_builtinIntegerLiteral:)
%4 = function_ref @$sSi22_builtinIntegerLiteralSiBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // user: %5
%5 = apply %4(%2, %3) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // user: %13
%6 = integer_literal $Builtin.IntLiteral, 1 // user: %9
%7 = metatype $@thin UInt8.Type // user: %9
// function_ref UInt8.init(_builtinIntegerLiteral:)
%8 = function_ref @$ss5UInt8V22_builtinIntegerLiteralABBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin UInt8.Type) -> UInt8 // user: %9
%9 = apply %8(%6, %7) : $@convention(method) (Builtin.IntLiteral, @thin UInt8.Type) -> UInt8 // user: %11
%10 = alloc_stack $UInt8 // users: %14, %13, %11
store %9 to [trivial] %10 : $*UInt8 // id: %11
// function_ref _rawPointerWrite<A>(address:value:)
%12 = function_ref @$ss16_rawPointerWrite7address5valueySi_xtlF : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0) -> () // user: %13
%13 = apply %12<UInt8>(%5, %10) : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0) -> ()
dealloc_stack %10 : $*UInt8 // id: %14
%15 = integer_literal $Builtin.IntLiteral, 37 // user: %18
%16 = metatype $@thin Int.Type // user: %18
// function_ref Int.init(_builtinIntegerLiteral:)
%17 = function_ref @$sSi22_builtinIntegerLiteralSiBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // user: %18
%18 = apply %17(%15, %16) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // user: %26
%19 = integer_literal $Builtin.IntLiteral, 0 // user: %22
%20 = metatype $@thin UInt8.Type // user: %22
// function_ref UInt8.init(_builtinIntegerLiteral:)
%21 = function_ref @$ss5UInt8V22_builtinIntegerLiteralABBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin UInt8.Type) -> UInt8 // user: %22
%22 = apply %21(%19, %20) : $@convention(method) (Builtin.IntLiteral, @thin UInt8.Type) -> UInt8 // user: %24
%23 = alloc_stack $UInt8 // users: %27, %26, %24
store %22 to [trivial] %23 : $*UInt8 // id: %24
// function_ref _rawPointerWrite<A>(address:value:)
%25 = function_ref @$ss16_rawPointerWrite7address5valueySi_xtlF : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0) -> () // user: %26
%26 = apply %25<UInt8>(%18, %23) : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0) -> ()
dealloc_stack %23 : $*UInt8 // id: %27
%28 = integer_literal $Builtin.Int32, 0 // user: %29
%29 = struct $Int32 (%28 : $Builtin.Int32) // user: %30
return %29 : $Int32 // id: %30
} // end sil function 'main'
// Int.init(_builtinIntegerLiteral:)
sil [transparent] [serialized] @$sSi22_builtinIntegerLiteralSiBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int
// UInt8.init(_builtinIntegerLiteral:)
sil [transparent] [serialized] @$ss5UInt8V22_builtinIntegerLiteralABBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin UInt8.Type) -> UInt8
// _rawPointerWrite<A>(address:value:)
sil [transparent] [serialized] @$ss16_rawPointerWrite7address5valueySi_xtlF : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0) -> ()
// Mappings from '#fileID' to '#filePath':
// 'main/main.swift' => 'main.swift'
This gets converted to this canonical SIL...
sil_stage canonical
import Builtin
import Swift
import SwiftShims
// main
sil @main : $@convention(c) (Int32, UnsafeMutablePointer<Optional<UnsafeMutablePointer<Int8>>>) -> Int32 {
bb0(%0 : $Int32, %1 : $UnsafeMutablePointer<Optional<UnsafeMutablePointer<Int8>>>):
%2 = integer_literal $Builtin.Word, 37 // user: %3
%3 = builtin "inttoptr_Word"(%2 : $Builtin.Word) : $Builtin.RawPointer // user: %4
%4 = pointer_to_address %3 : $Builtin.RawPointer to [strict] $*UInt8 // user: %7
%5 = integer_literal $Builtin.Int8, 0 // user: %6
%6 = struct $UInt8 (%5 : $Builtin.Int8) // user: %7
store %6 to %4 : $*UInt8 // id: %7
%8 = integer_literal $Builtin.Int32, 0 // user: %9
%9 = struct $Int32 (%8 : $Builtin.Int32) // user: %10
return %9 : $Int32 // id: %10
} // end sil function 'main'
// Mappings from '#fileID' to '#filePath':
// 'main/main.swift' => 'main.swift'
So the optimisation occurs between raw SIL and canonical SIL. How can I track down what optimisation is doing the transform I am not expecting?
I'm a bit more used to LLVM optimisations where I might use -opt-bisect-limit
but I don't see an equivalent in Swift for SIL.
Can anyone give any clues how I might investigate what's going on?
Thank you!
Carl