Skip to content

Commit

Permalink
Rollup merge of rust-lang#47964 - jcowgill:mips64-abi, r=eddyb
Browse files Browse the repository at this point in the history
rustc_trans: rewrite mips64 ABI code

This PR rewrites the ABI handling code for 64-bit MIPS and should fix various FFI issues including rust-lang#47290.

To accomodate the 64-bit ABI I have had to add a new `CastTarget` variant which I've called `Chunked` (though maybe this isn't the best name). This allows an ABI to cast to some arbitrary structure of `Reg` types. This is required on MIPS which might need to cast to a structure containing a mixture of `i64` and `f64` types.
  • Loading branch information
kennytm committed Feb 25, 2018
2 parents 026339e + 47c33f7 commit f5116e7
Show file tree
Hide file tree
Showing 3 changed files with 188 additions and 76 deletions.
106 changes: 54 additions & 52 deletions src/librustc_trans/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::layout::{HasDataLayout, LayoutOf};

use libc::c_uint;
use std::{cmp, iter};
use std::cmp;

pub use syntax::abi::Abi;
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
Expand Down Expand Up @@ -279,30 +279,6 @@ impl Uniform {
pub fn align(&self, cx: &CodegenCx) -> Align {
self.unit.align(cx)
}

pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
let llunit = self.unit.llvm_type(cx);

if self.total <= self.unit.size {
return llunit;
}

let count = self.total.bytes() / self.unit.size.bytes();
let rem_bytes = self.total.bytes() % self.unit.size.bytes();

if rem_bytes == 0 {
return Type::array(&llunit, count);
}

// Only integers can be really split further.
assert_eq!(self.unit.kind, RegKind::Integer);

let args: Vec<_> = (0..count).map(|_| llunit)
.chain(iter::once(Type::ix(cx, rem_bytes * 8)))
.collect();

Type::struct_(cx, &args, false)
}
}

pub trait LayoutExt<'tcx> {
Expand Down Expand Up @@ -405,55 +381,81 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
}

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum CastTarget {
Uniform(Uniform),
Pair(Reg, Reg)
pub struct CastTarget {
pub prefix: [Option<RegKind>; 8],
pub prefix_chunk: Size,
pub rest: Uniform,
}

impl From<Reg> for CastTarget {
fn from(unit: Reg) -> CastTarget {
CastTarget::Uniform(Uniform::from(unit))
CastTarget::from(Uniform::from(unit))
}
}

impl From<Uniform> for CastTarget {
fn from(uniform: Uniform) -> CastTarget {
CastTarget::Uniform(uniform)
CastTarget {
prefix: [None; 8],
prefix_chunk: Size::from_bytes(0),
rest: uniform
}
}
}

impl CastTarget {
pub fn size(&self, cx: &CodegenCx) -> Size {
match *self {
CastTarget::Uniform(u) => u.total,
CastTarget::Pair(a, b) => {
(a.size.abi_align(a.align(cx)) + b.size)
.abi_align(self.align(cx))
}
pub fn pair(a: Reg, b: Reg) -> CastTarget {
CastTarget {
prefix: [Some(a.kind), None, None, None, None, None, None, None],
prefix_chunk: a.size,
rest: Uniform::from(b)
}
}

pub fn size(&self, cx: &CodegenCx) -> Size {
(self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
.abi_align(self.rest.align(cx)) + self.rest.total
}

pub fn align(&self, cx: &CodegenCx) -> Align {
match *self {
CastTarget::Uniform(u) => u.align(cx),
CastTarget::Pair(a, b) => {
cx.data_layout().aggregate_align
.max(a.align(cx))
.max(b.align(cx))
}
}
self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind: kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
|acc, align| acc.max(align))
}

pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
match *self {
CastTarget::Uniform(u) => u.llvm_type(cx),
CastTarget::Pair(a, b) => {
Type::struct_(cx, &[
a.llvm_type(cx),
b.llvm_type(cx)
], false)
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let rest_count = self.rest.total.bytes() / self.rest.unit.size.bytes();
let rem_bytes = self.rest.total.bytes() % self.rest.unit.size.bytes();

if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if self.rest.total <= self.rest.unit.size {
return rest_ll_unit;
}

// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return Type::array(&rest_ll_unit, rest_count);
}
}

// Create list of fields in the main structure
let mut args: Vec<_> =
self.prefix.iter().flat_map(|option_kind| option_kind.map(
|kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
.chain((0..rest_count).map(|_| rest_ll_unit))
.collect();

// Append final integer
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(Type::ix(cx, rem_bytes * 8));
}

Type::struct_(cx, &args, false)
}
}

Expand Down
156 changes: 133 additions & 23 deletions src/librustc_trans/cabi_mips64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,50 +8,160 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use abi::{ArgAttribute, ArgType, CastTarget, FnType, LayoutExt, PassMode, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc::ty::layout::{self, Size};

use rustc::ty::layout::Size;
fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) {
// Always sign extend u32 values on 64-bit mips
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if let layout::Int(i, signed) = scalar.value {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
attrs.set(ArgAttribute::SExt);
return;
}
}
}
}

arg.extend_integer_width_to(bits);
}

fn bits_to_int_reg(bits: u64) -> Reg {
if bits <= 8 {
Reg::i8()
} else if bits <= 16 {
Reg::i16()
} else if bits <= 32 {
Reg::i32()
} else {
Reg::i64()
}
}

fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ret: &mut ArgType<'tcx>,
offset: &mut Size) {
fn float_reg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &ArgType<'tcx>, i: usize) -> Option<Reg> {
match ret.layout.field(cx, i).abi {
layout::Abi::Scalar(ref scalar) => match scalar.value {
layout::F32 => Some(Reg::f32()),
layout::F64 => Some(Reg::f64()),
_ => None
},
_ => None
}
}

fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
extend_integer_width_mips(ret, 64);
return;
}

let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
// Unlike other architectures which return aggregates in registers, MIPS n64 limits the
// use of float registers to structures (not unions) containing exactly one or two
// float fields.

if let layout::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
if ret.layout.fields.count() == 1 {
if let Some(reg) = float_reg(cx, ret, 0) {
ret.cast_to(reg);
return;
}
} else if ret.layout.fields.count() == 2 {
if let Some(reg0) = float_reg(cx, ret, 0) {
if let Some(reg1) = float_reg(cx, ret, 1) {
ret.cast_to(CastTarget::pair(reg0, reg1));
return;
}
}
}
}

// Cast to a uniform int structure
ret.cast_to(Uniform {
unit: bits_to_int_reg(bits),
total: size
});
} else {
ret.make_indirect();
*offset += cx.tcx.data_layout.pointer_size;
}
}

fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if !arg.layout.is_aggregate() {
extend_integer_width_mips(arg, 64);
return;
}

let dl = &cx.tcx.data_layout;
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
let mut prefix = [None; 8];
let mut prefix_index = 0;

if arg.layout.is_aggregate() {
arg.cast_to(Uniform {
unit: Reg::i64(),
total: size
});
if !offset.is_abi_aligned(align) {
arg.pad_with(Reg::i64());
match arg.layout.fields {
layout::FieldPlacement::Array { .. } => {
// Arrays are passed indirectly
arg.make_indirect();
return;
}
} else {
arg.extend_integer_width_to(64);
}
layout::FieldPlacement::Union(_) => {
// Unions and are always treated as a series of 64-bit integer chunks
},
layout::FieldPlacement::Arbitrary { .. } => {
// Structures are split up into a series of 64-bit integer chunks, but any aligned
// doubles not part of another aggregate are passed as floats.
let mut last_offset = Size::from_bytes(0);

for i in 0..arg.layout.fields.count() {
let field = arg.layout.field(cx, i);
let offset = arg.layout.fields.offset(i);

// We only care about aligned doubles
if let layout::Abi::Scalar(ref scalar) = field.abi {
if let layout::F64 = scalar.value {
if offset.is_abi_aligned(dl.f64_align) {
// Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_abi_aligned(dl.f64_align));
for _ in 0..((offset - last_offset).bits() / 64)
.min((prefix.len() - prefix_index) as u64) {

prefix[prefix_index] = Some(RegKind::Integer);
prefix_index += 1;
}

if prefix_index == prefix.len() {
break;
}

prefix[prefix_index] = Some(RegKind::Float);
prefix_index += 1;
last_offset = offset + Reg::f64().size;
}
}
}
}
}
};

*offset = offset.abi_align(align) + size.abi_align(align);
// Extract first 8 chunks as the prefix
let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
arg.cast_to(CastTarget {
prefix: prefix,
prefix_chunk: Size::from_bytes(8),
rest: Uniform { unit: Reg::i64(), total: rest_size }
});
}

pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
classify_ret_ty(cx, &mut fty.ret);
}

for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, &mut offset);
classify_arg_ty(cx, arg);
}
}
2 changes: 1 addition & 1 deletion src/librustc_trans/cabi_x86_64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
let mut target = CastTarget::from(lo);
if size > offset {
if let Some(hi) = reg_component(cls, &mut i, size - offset) {
target = CastTarget::Pair(lo, hi);
target = CastTarget::pair(lo, hi);
}
}
assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None);
Expand Down

0 comments on commit f5116e7

Please sign in to comment.