Academic Integrity: tutoring, explanations, and feedback — we don’t complete graded work or submit on a student’s behalf.

How the mutual exclusion is achieved in the XV6 operating system according to th

ID: 3831931 • Letter: H

Question

How the mutual exclusion is achieved in the XV6 operating system according to the source code: spinlock.c

spinlock.c

/ Mutual exclusion spin locks.

#include "types.h"

#include "defs.h"

#include "param.h"

#include "x86.h"

#include "memlayout.h"

#include "mmu.h"

#include "proc.h"

#include "spinlock.h"

void

initlock(struct spinlock *lk, char *name)

{

lk->name = name;

lk->locked = 0;

lk->cpu = 0;

}

// Acquire the lock.

// Loops (spins) until the lock is acquired.

// Holding a lock for a long time may cause

// other CPUs to waste time spinning to acquire it.

void

acquire(struct spinlock *lk)

{

pushcli(); // disable interrupts to avoid deadlock.

if(holding(lk))

panic("acquire");

// The xchg is atomic.

while(xchg(&lk->locked, 1) != 0)

;

// Tell the C compiler and the processor to not move loads or stores

// past this point, to ensure that the critical section's memory

// references happen after the lock is acquired.

__sync_synchronize();

// Record info about lock acquisition for debugging.

lk->cpu = cpu;

getcallerpcs(&lk, lk->pcs);

}

// Release the lock.

void

release(struct spinlock *lk)

{

if(!holding(lk))

panic("release");

lk->pcs[0] = 0;

lk->cpu = 0;

// Tell the C compiler and the processor to not move loads or stores

// past this point, to ensure that all the stores in the critical

// section are visible to other cores before the lock is released.

// Both the C compiler and the hardware may re-order loads and

// stores; __sync_synchronize() tells them both to not re-order.

__sync_synchronize();

// Release the lock.

lk->locked = 0;

popcli();

}

// Record the current call stack in pcs[] by following the %ebp chain.

void

getcallerpcs(void *v, uint pcs[])

{

uint *ebp;

int i;

ebp = (uint*)v - 2;

for(i = 0; i < 10; i++){

if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)

break;

pcs[i] = ebp[1]; // saved %eip

ebp = (uint*)ebp[0]; // saved %ebp

}

for(; i < 10; i++)

pcs[i] = 0;

}

// Check whether this cpu is holding the lock.

int

holding(struct spinlock *lock)

{

return lock->locked && lock->cpu == cpu;

}

// Pushcli/popcli are like cli/sti except that they are matched:

// it takes two popcli to undo two pushcli. Also, if interrupts

// are off, then pushcli, popcli leaves them off.

void

pushcli(void)

{

int eflags;

eflags = readeflags();

cli();

if(cpu->ncli == 0)

cpu->intena = eflags & FL_IF;

cpu->ncli += 1;

}

void

popcli(void)

{

if(readeflags()&FL_IF)

panic("popcli - interruptible");

if(--cpu->ncli < 0)

panic("popcli");

if(cpu->ncli == 0 && cpu->intena)

sti();

}

Explanation / Answer

#include "types.h"

#include "defs.h"

#include "param.h"

#include "x86.h"

#include "memlayout.h"

#include "mmu.h"

#include "proc.h"

#include "spinlock.h"

void

initlock(struct spinlock *lk, char *name)

{

lk->name = name;

lk->locked = 0;

lk->cpu = 0;

}

// Acquire the lock.

// Loops (spins) until the lock is acquired.

// Holding a lock for a long time may cause

// other CPUs to waste time spinning to acquire it.

void

acquire(struct spinlock *lk)

{

pushcli(); // disable interrupts to avoid deadlock.

if(holding(lk))

panic("acquire");

// The xchg is atomic.

while(xchg(&lk->locked, 1) != 0)

;

// Tell the C compiler and the processor to not move loads or stores

// past this point, to ensure that the critical section's memory

// references happen after the lock is acquired.

__sync_synchronize();

// Record info about lock acquisition for debugging.

lk->cpu = cpu;

getcallerpcs(&lk, lk->pcs);

}

// Release the lock.

void

release(struct spinlock *lk)

{

if(!holding(lk))

panic("release");

lk->pcs[0] = 0;

lk->cpu = 0;

// Tell the C compiler and the processor to not move loads or stores

// past this point, to ensure that all the stores in the critical

// section are visible to other cores before the lock is released.

// Both the C compiler and the hardware may re-order loads and

// stores; __sync_synchronize() tells them both to not re-order.

__sync_synchronize();

// Release the lock.

lk->locked = 0;

popcli();

}

// Record the current call stack in pcs[] by following the %ebp chain.

void

getcallerpcs(void *v, uint pcs[])

{

uint *ebp;

int i;

ebp = (uint*)v - 2;

for(i = 0; i < 10; i++){

if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)

break;

pcs[i] = ebp[1]; // saved %eip

ebp = (uint*)ebp[0]; // saved %ebp

}

for(; i < 10; i++)

pcs[i] = 0;

}

// Check whether this cpu is holding the lock.

int

holding(struct spinlock *lock)

{

return lock->locked && lock->cpu == cpu;

}

// Pushcli/popcli are like cli/sti except that they are matched:

// it takes two popcli to undo two pushcli. Also, if interrupts

// are off, then pushcli, popcli leaves them off.

void

pushcli(void)

{

int eflags;

eflags = readeflags();

cli();

if(cpu->ncli == 0)

cpu->intena = eflags & FL_IF;

cpu->ncli += 1;

}

void

popcli(void)

{

if(readeflags()&FL_IF)

panic("popcli - interruptible");

if(--cpu->ncli < 0)

panic("popcli");

if(cpu->ncli == 0 && cpu->intena)

sti();

Hire Me For All Your Tutoring Needs
Integrity-first tutoring: clear explanations, guidance, and feedback.
Drop an Email at
drjack9650@gmail.com
Chat Now And Get Quote