作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetFpuState(state Fpu) error {
// Prepare our data.
var kvm_fpu C.struct_kvm_fpu
for i := 0; i < len(state.FPR); i += 1 {
for j := 0; j < len(state.FPR[i]); j += 1 {
kvm_fpu.fpr[i][j] = C.__u8(state.FPR[i][j])
}
}
kvm_fpu.fcw = C.__u16(state.FCW)
kvm_fpu.fsw = C.__u16(state.FSW)
kvm_fpu.ftwx = C.__u8(state.FTWX)
kvm_fpu.last_opcode = C.__u16(state.LastOpcode)
kvm_fpu.last_ip = C.__u64(state.LastIp)
kvm_fpu.last_dp = C.__u64(state.LastDp)
for i := 0; i < len(state.XMM); i += 1 {
for j := 0; j < len(state.XMM[i]); j += 1 {
kvm_fpu.xmm[i][j] = C.__u8(state.XMM[i][j])
}
}
kvm_fpu.mxcsr = C.__u32(state.MXCSR)
// Execute the ioctl.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vcpu.fd),
uintptr(C.IoctlSetFpu),
uintptr(unsafe.Pointer(&kvm_fpu)))
if e != 0 {
return e
}
return nil
}
作者:XenServerBestPractic
项目:nov
func (vm *Vm) MapUserMemory(
start Paddr,
size uint64,
mmap []byte) error {
// See NOTE above about read-only memory.
// As we will not support it for the moment,
// we do not expose it through the interface.
// Leveraging that feature will likely require
// a small amount of re-architecting in any case.
var region C.struct_kvm_userspace_memory_region
region.slot = C.__u32(vm.mem_region)
region.flags = C.__u32(0)
region.guest_phys_addr = C.__u64(start)
region.memory_size = C.__u64(size)
region.userspace_addr = C.__u64(uintptr(unsafe.Pointer(&mmap[0])))
// Execute the ioctl.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vm.fd),
uintptr(C.IoctlSetUserMemoryRegion),
uintptr(unsafe.Pointer(®ion)))
if e != 0 {
return e
}
// We're set, bump our slot.
vm.mem_region += 1
return nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetDescriptor(
desc Descriptor,
val DescriptorValue,
sync bool) error {
err := vcpu.refreshSRegs(true)
if err != nil {
return err
}
switch desc {
case GDT:
vcpu.sregs.gdt.base = C.__u64(val.Base)
vcpu.sregs.gdt.limit = C.__u16(val.Limit)
case IDT:
vcpu.sregs.idt.base = C.__u64(val.Base)
vcpu.sregs.idt.limit = C.__u16(val.Limit)
default:
return UnknownRegister
}
if sync {
err = vcpu.flushSRegs()
if err != nil {
return err
}
}
return nil
}
作者:pla
项目:pachyder
func ffiPropertySetReadonly(path string, readOnly bool) error {
var flags C.__u64
if readOnly {
flags |= C.__u64(C.BTRFS_SUBVOL_RDONLY)
} else {
flags = flags &^ C.__u64(C.BTRFS_SUBVOL_RDONLY)
}
return ffiIoctl(path, C.BTRFS_IOC_SUBVOL_SETFLAGS, uintptr(unsafe.Pointer(&flags)))
}
作者:kissthin
项目:go-btrf
func (ila *inodeLookupArgs) C() C.struct_btrfs_ioctl_ino_lookup_args {
var args C.struct_btrfs_ioctl_ino_lookup_args
args.objectid = C.__u64(ila.ObjectID)
args.treeid = C.__u64(ila.TreeID)
if ila.Name != "" {
str := [C.BTRFS_INO_LOOKUP_PATH_MAX]C.char{}
for i := 0; i < len(ila.Name) && i < C.BTRFS_INO_LOOKUP_PATH_MAX; i++ {
str[i] = C.char(ila.Name[i])
}
args.name = str
}
return args
}
作者:AkihiroSud
项目:go-linuxsche
// sched_setattr(2)
func SetAttr(pid int, attr SchedAttr) error {
cAttr := C.struct_sched_attr{
C.__u32(C.SCHED_ATTR_SIZE),
C.__u32(attr.Policy),
C.__u64(attr.Flags),
C.__s32(attr.Nice),
C.__u32(attr.Priority),
C.__u64(attr.Runtime.Nanoseconds()),
C.__u64(attr.Deadline.Nanoseconds()),
C.__u64(attr.Period.Nanoseconds()),
}
_, err := C.sched_setattr(C.pid_t(pid), &cAttr, C.uint(0))
return err
}
作者:kissthin
项目:go-btrf
func huurr(dirpath string) ([]string, error) {
inoArgs, err := inodeLookup(dirpath)
if err != nil {
return nil, err
}
var searchKey C.struct_btrfs_ioctl_search_key
searchKey.min_objectid = C.__u64(inoArgs.TreeID)
searchKey.max_objectid = C.__u64(inoArgs.TreeID)
searchKey.min_type = C.BTRFS_ROOT_ITEM_KEY
searchKey.max_type = C.BTRFS_ROOT_ITEM_KEY
searchKey.max_offset = (1<<48 - 1)
searchKey.max_transid = (1<<48 - 1)
return nil, nil
}
作者:XenServerBestPractic
项目:nov
func (vm *Vm) SetEventFd(
eventfd *EventFd,
paddr Paddr,
size uint,
is_pio bool,
unbind bool,
has_value bool,
value uint64) error {
var ioeventfd C.struct_kvm_ioeventfd
ioeventfd.addr = C.__u64(paddr)
ioeventfd.len = C.__u32(size)
ioeventfd.fd = C.__s32(eventfd.Fd())
ioeventfd.datamatch = C.__u64(value)
if is_pio {
ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagPio)
}
if unbind {
ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDeassign)
}
if has_value {
ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDatamatch)
}
// Bind / unbind the eventfd.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vm.fd),
uintptr(C.IoctlIoEventFd),
uintptr(unsafe.Pointer(&ioeventfd)))
if e != 0 {
return e
}
// Success.
return nil
}
作者:pla
项目:pachyder
func ffiSubvolumeSnapshot(src string, dest string, readOnly bool) error {
srcDir, err := ffiOpenDir(src)
if err != nil {
return err
}
defer ffiCloseDir(srcDir)
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(ffiGetDirFd(srcDir))
if readOnly {
args.flags |= C.__u64(C.BTRFS_SUBVOL_RDONLY)
}
for i, c := range []byte(filepath.Base(dest)) {
args.name[i] = C.char(c)
}
return ffiIoctl(filepath.Dir(dest), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args)))
}
作者:XenServerBestPractic
项目:nov
func (vm *Vm) SetClock(clock Clock) error {
// Execute the ioctl.
var kvm_clock_data C.struct_kvm_clock_data
kvm_clock_data.clock = C.__u64(clock.Time)
kvm_clock_data.flags = C.__u32(clock.Flags)
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vm.fd),
uintptr(C.IoctlSetClock),
uintptr(unsafe.Pointer(&kvm_clock_data)))
if e != 0 {
return e
}
return nil
}
作者:harch
项目:docke
func subvolLimitQgroup(path string, size uint64) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
}
return nil
}
作者:XenServerBestPractic
项目:nov
func (vm *Vm) MapSpecialMemory(addr Paddr) error {
// We require 1 page for the identity map.
err := vm.MapReservedMemory(addr, PageSize)
if err != nil {
return err
}
// Set the EPT identity map.
// (This requires a single page).
ept_identity_addr := C.__u64(addr)
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vm.fd),
uintptr(C.IoctlSetIdentityMapAddr),
uintptr(unsafe.Pointer(&ept_identity_addr)))
if e != 0 {
log.Printf("Unable to set identity map to %08x!", addr)
return e
}
// We require 3 pages for the TSS address.
err = vm.MapReservedMemory(addr+PageSize, 3*PageSize)
if err != nil {
return err
}
// Set the TSS address to above.
// (This requires three pages).
_, _, e = syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vm.fd),
uintptr(C.IoctlSetTssAddr),
uintptr(addr+PageSize))
if e != 0 {
log.Printf("Unable to set TSS ADDR to %08x!", addr+PageSize)
return e
}
// We're okay.
return nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetMsr(index uint32, value uint64) error {
// Setup our structure.
data := make([]byte, C.msr_size(), C.msr_size())
// Set our index and value.
C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(value))
// Execute our ioctl.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vcpu.fd),
uintptr(C.IoctlSetMsrs),
uintptr(unsafe.Pointer(&data[0])))
if e != 0 {
return e
}
return nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) GetMsr(index uint32) (uint64, error) {
// Setup our structure.
data := make([]byte, C.msr_size(), C.msr_size())
// Set our index to retrieve.
C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(0))
// Execute our ioctl.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vcpu.fd),
uintptr(C.IoctlGetMsrs),
uintptr(unsafe.Pointer(&data[0])))
if e != 0 {
return 0, e
}
// Return our value.
return uint64(C.msr_get(unsafe.Pointer(&data[0]))), nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) Translate(
vaddr Vaddr) (Paddr, bool, bool, bool, error) {
// Perform the translation.
var translation C.struct_kvm_translation
translation.linear_address = C.__u64(vaddr)
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vcpu.fd),
uintptr(C.IoctlTranslate),
uintptr(unsafe.Pointer(&translation)))
if e != 0 {
return Paddr(0), false, false, false, e
}
paddr := Paddr(translation.physical_address)
valid := translation.valid != C.__u8(0)
writeable := translation.writeable != C.__u8(0)
usermode := translation.valid != C.__u8(0)
return paddr, valid, writeable, usermode, nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetXcrs(xcrs []Xcr) error {
// Build our parameter.
var kvm_xcrs C.struct_kvm_xcrs
kvm_xcrs.nr_xcrs = C.__u32(len(xcrs))
for i, xcr := range xcrs {
kvm_xcrs.xcrs[i].xcr = C.__u32(xcr.Id)
kvm_xcrs.xcrs[i].value = C.__u64(xcr.Value)
}
// Execute the ioctl.
_, _, e := syscall.Syscall(
syscall.SYS_IOCTL,
uintptr(vcpu.fd),
uintptr(C.IoctlSetXcrs),
uintptr(unsafe.Pointer(&kvm_xcrs)))
if e != 0 {
return e
}
return nil
}
作者:movich
项目:docke
// setProjectQuota - set the quota for project id on xfs block device
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
d.d_flags = C.XFS_PROJ_QUOTA
d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
projectID, backingFsBlockDev, errno.Error())
}
return nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetControlRegister(
reg ControlRegister,
val ControlRegisterValue,
sync bool) error {
err := vcpu.refreshSRegs(true)
if err != nil {
return err
}
switch reg {
case CR0:
vcpu.sregs.cr0 = C.__u64(val)
case CR2:
vcpu.sregs.cr2 = C.__u64(val)
case CR3:
vcpu.sregs.cr3 = C.__u64(val)
case CR4:
vcpu.sregs.cr4 = C.__u64(val)
case CR8:
vcpu.sregs.cr8 = C.__u64(val)
case EFER:
vcpu.sregs.efer = C.__u64(val)
case APIC_BASE:
vcpu.sregs.apic_base = C.__u64(val)
default:
return UnknownRegister
}
if sync {
err = vcpu.flushSRegs()
if err != nil {
return err
}
}
return nil
}
作者:pla
项目:btrf
func findUpdatedFiles(dir *C.DIR, rootId, oldestGen uint64) (uint64, error) {
var maxFound uint64 = 0
var args C.struct_btrfs_ioctl_search_args
var sk *C.struct_btrfs_ioctl_search_key = &args.key
var sh C.struct_btrfs_ioctl_search_header
var item *BtrfsFileExtentItem
var backup BtrfsFileExtentItem
var foundGen uint64 = 0
sk.tree_id = C.__u64(rootId)
sk.max_objectid = math.MaxUint64
sk.max_offset = math.MaxUint64
sk.max_transid = math.MaxUint64
sk.max_type = C.BTRFS_EXTENT_DATA_KEY
sk.min_transid = C.__u64(oldestGen)
sk.nr_items = 4096
fd := getDirFd(dir)
maxFound, err := findRootGen(dir)
if err != nil {
return 0, err
}
for {
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return 0, fmt.Errorf("Failed to perform the search %v", errno.Error())
}
if sk.nr_items == 0 {
break
}
var off uintptr = 0
for i := uint32(0); i < uint32(sk.nr_items); i++ {
C.memcpy(unsafe.Pointer(&sh), addptr(unsafe.Pointer(&args.buf), off), C.sizeof_struct_btrfs_ioctl_search_header)
off += C.sizeof_struct_btrfs_ioctl_search_header
if sh.len == 0 {
item = &backup
} else {
rawItem := (*C.struct_btrfs_file_extent_item)(addptr(unsafe.Pointer(&args.buf), off))
item, err = NewBtrfsFileExtentItem(rawItem)
if err != nil {
return 0, err
}
}
foundGen = item.Generation
if sh._type == C.BTRFS_EXTENT_DATA_KEY && foundGen >= uint64(oldestGen) {
// print...
}
off += uintptr(sh.len)
sk.min_objectid = sh.objectid
sk.min_offset = sh.offset
sk.min_type = sh._type
}
sk.nr_items = 4096
if sk.min_offset < math.MaxUint64 {
sk.min_offset++
} else if sk.min_objectid < math.MaxUint64 {
sk.min_objectid++
sk.min_offset = 0
sk.min_type = 0
} else {
break
}
}
return maxFound, nil
}
作者:XenServerBestPractic
项目:nov
func (vcpu *Vcpu) SetSegment(
seg Segment,
val SegmentValue,
sync bool) error {
err := vcpu.refreshSRegs(true)
if err != nil {
return err
}
switch seg {
case CS:
vcpu.sregs.cs.base = C.__u64(val.Base)
vcpu.sregs.cs.limit = C.__u32(val.Limit)
vcpu.sregs.cs.selector = C.__u16(val.Selector)
vcpu.sregs.cs._type = C.__u8(val.Type)
vcpu.sregs.cs.present = C.__u8(val.Present)
vcpu.sregs.cs.dpl = C.__u8(val.Dpl)
vcpu.sregs.cs.db = C.__u8(val.Db)
vcpu.sregs.cs.s = C.__u8(val.S)
vcpu.sregs.cs.l = C.__u8(val.L)
vcpu.sregs.cs.g = C.__u8(val.G)
vcpu.sregs.cs.avl = C.__u8(val.Avl)
vcpu.sregs.cs.unusable = C.__u8(^val.Present & 0x1)
case DS:
vcpu.sregs.ds.base = C.__u64(val.Base)
vcpu.sregs.ds.limit = C.__u32(val.Limit)
vcpu.sregs.ds.selector = C.__u16(val.Selector)
vcpu.sregs.ds._type = C.__u8(val.Type)
vcpu.sregs.ds.present = C.__u8(val.Present)
vcpu.sregs.ds.dpl = C.__u8(val.Dpl)
vcpu.sregs.ds.db = C.__u8(val.Db)
vcpu.sregs.ds.s = C.__u8(val.S)
vcpu.sregs.ds.l = C.__u8(val.L)
vcpu.sregs.ds.g = C.__u8(val.G)
vcpu.sregs.ds.avl = C.__u8(val.Avl)
vcpu.sregs.ds.unusable = C.__u8(^val.Present & 0x1)
case ES:
vcpu.sregs.es.base = C.__u64(val.Base)
vcpu.sregs.es.limit = C.__u32(val.Limit)
vcpu.sregs.es.selector = C.__u16(val.Selector)
vcpu.sregs.es._type = C.__u8(val.Type)
vcpu.sregs.es.present = C.__u8(val.Present)
vcpu.sregs.es.dpl = C.__u8(val.Dpl)
vcpu.sregs.es.db = C.__u8(val.Db)
vcpu.sregs.es.s = C.__u8(val.S)
vcpu.sregs.es.l = C.__u8(val.L)
vcpu.sregs.es.g = C.__u8(val.G)
vcpu.sregs.es.avl = C.__u8(val.Avl)
vcpu.sregs.es.unusable = C.__u8(^val.Present & 0x1)
case FS:
vcpu.sregs.fs.base = C.__u64(val.Base)
vcpu.sregs.fs.limit = C.__u32(val.Limit)
vcpu.sregs.fs.selector = C.__u16(val.Selector)
vcpu.sregs.fs._type = C.__u8(val.Type)
vcpu.sregs.fs.present = C.__u8(val.Present)
vcpu.sregs.fs.dpl = C.__u8(val.Dpl)
vcpu.sregs.fs.db = C.__u8(val.Db)
vcpu.sregs.fs.s = C.__u8(val.S)
vcpu.sregs.fs.l = C.__u8(val.L)
vcpu.sregs.fs.g = C.__u8(val.G)
vcpu.sregs.fs.avl = C.__u8(val.Avl)
vcpu.sregs.fs.unusable = C.__u8(^val.Present & 0x1)
case GS:
vcpu.sregs.gs.base = C.__u64(val.Base)
vcpu.sregs.gs.limit = C.__u32(val.Limit)
vcpu.sregs.gs.selector = C.__u16(val.Selector)
vcpu.sregs.gs._type = C.__u8(val.Type)
vcpu.sregs.gs.present = C.__u8(val.Present)
vcpu.sregs.gs.dpl = C.__u8(val.Dpl)
vcpu.sregs.gs.db = C.__u8(val.Db)
vcpu.sregs.gs.s = C.__u8(val.S)
vcpu.sregs.gs.l = C.__u8(val.L)
vcpu.sregs.gs.g = C.__u8(val.G)
vcpu.sregs.gs.avl = C.__u8(val.Avl)
vcpu.sregs.gs.unusable = C.__u8(^val.Present & 0x1)
case SS:
vcpu.sregs.ss.base = C.__u64(val.Base)
vcpu.sregs.ss.limit = C.__u32(val.Limit)
vcpu.sregs.ss.selector = C.__u16(val.Selector)
vcpu.sregs.ss._type = C.__u8(val.Type)
vcpu.sregs.ss.present = C.__u8(val.Present)
vcpu.sregs.ss.dpl = C.__u8(val.Dpl)
vcpu.sregs.ss.db = C.__u8(val.Db)
vcpu.sregs.ss.s = C.__u8(val.S)
vcpu.sregs.ss.l = C.__u8(val.L)
vcpu.sregs.ss.g = C.__u8(val.G)
vcpu.sregs.ss.avl = C.__u8(val.Avl)
vcpu.sregs.ss.unusable = C.__u8(^val.Present & 0x1)
case TR:
vcpu.sregs.tr.base = C.__u64(val.Base)
vcpu.sregs.tr.limit = C.__u32(val.Limit)
vcpu.sregs.tr.selector = C.__u16(val.Selector)
vcpu.sregs.tr._type = C.__u8(val.Type)
vcpu.sregs.tr.present = C.__u8(val.Present)
vcpu.sregs.tr.dpl = C.__u8(val.Dpl)
vcpu.sregs.tr.db = C.__u8(val.Db)
vcpu.sregs.tr.s = C.__u8(val.S)
vcpu.sregs.tr.l = C.__u8(val.L)
vcpu.sregs.tr.g = C.__u8(val.G)
//.........这里部分代码省略.........