By default, when you import a module, the image extents are increased to include the entire model, essentially undoing the limit that you set on the reference geometry. You can disable this by changing a conversion parameter.
After some cleanup and simplification of the code, here is a complete working version:
inputModelFile = r"c:\Users\andra\OneDrive\SpinePhantom2Model.stl"
outputDir = r"c:\tmp\20210211-model-rasterization"
outputVolumeLabelValue = 255
inPlaneResolutionDpi = 400
planeThicknessMm = 0.1
outputVolumeMarginMm = [0, 0, 0] # mm of buffer before/after and around
import math
import numpy as np
import sys
try:
import imageio
except ImportError:
pip_install("imageio")
imageSpacingMm = [25.4/inPlaneResolutionDpi, 25.4/inPlaneResolutionDpi, planeThicknessMm]
inputModel = slicer.util.loadModel(inputModelFile)
bounds = np.zeros(6)
inputModel.GetBounds(bounds)
imageSize = [ int((math.ceil(bounds[axis*2+1] - bounds[axis*2] + 2 * outputVolumeMarginMm[axis]))/imageSpacingMm[axis]) for axis in range(3) ]
imageOrigin = [ bounds[axis*2] - outputVolumeMarginMm[axis] for axis in range(3) ]
sliceImageData = vtk.vtkImageData()
sliceImageData.SetDimensions(imageSize[0], imageSize[1], 1)
sliceImageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
sliceImageData.GetPointData().GetScalars().Fill(0)
sliceNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
sliceNode.SetOrigin(imageOrigin)
sliceNode.SetSpacing(imageSpacingMm)
sliceNode.SetAndObserveImageData(sliceImageData)
sliceNode.CreateDefaultDisplayNodes()
seg = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLSegmentationNode')
seg.SetReferenceImageGeometryParameterFromVolumeNode(sliceNode)
# restrict rasterization to the reference geometry (single frame at a time)
seg.GetSegmentation().SetConversionParameter("Crop to reference image geometry", "1")
seg.GetSegmentation().SetSourceRepresentationName("Closed surface")
slicer.modules.segmentations.logic().ImportModelToSegmentationNode(inputModel, seg)
segmentId = seg.GetSegmentation().GetNthSegmentID(0)
seg.CreateBinaryLabelmapRepresentation()
for sliceIndex in range(imageSize[2]):
sliceNode.SetOrigin(imageOrigin[0], imageOrigin[1], imageOrigin[2] + sliceIndex * imageSpacingMm[2])
seg.SetReferenceImageGeometryParameterFromVolumeNode(sliceNode)
seg.GetSegmentation().CreateRepresentation('Binary labelmap', True) # Force recreating binary labelmap representation immediately
outputSliceImageArray = np.squeeze(slicer.util.arrayFromSegmentBinaryLabelmap(seg, segmentId)).astype('uint8')
outputSliceImageArray[outputSliceImageArray>0] = outputVolumeLabelValue
filename = f'{outputDir}/slice_{sliceIndex:03}.tif'
result = imageio.imwrite(filename, outputSliceImageArray)
print(filename)
slicer.app.processEvents() # update console
It takes a while to generate thousands of slices if conversion is done for one slice at a time. Probably you can make it faster if you convert 10-20 frames at a time instead.