configure fails for libstdc++-v3 under hpux 10.20 DCE threads
John David Anglin
dave@hiauly1.hia.nrc.ca
Fri Nov 17 11:43:00 GMT 2000
> On Fri, Nov 17, 2000 at 12:30:26PM -0500, John David Anglin wrote:
> >
> > checking for threads package to use... threads-no.h
>
> So, it's not DCE threads, it's no threads at all. Which is okay.
I wasn't thinking. Under hpux 10.20 when DCE threads is installed,
you get a multilib implementation--no threads and threads.
>
> > checking for atomicity.h... configure: error: Atomic locking requested, but is
> > an unknown thread package and atomic operations are not present in the CPU
>
> Someday I need to come up with a better error message there.
>
> The library needs to know how to do atomic integer operations, and
> if it doesn't have any information for a particular target, that's
> what happens. For some reason we give an error rather than choosing
> the generic implementation. (Maybe because the generic version isn't
> necessarily atomic?)
If I recall, this is a problem on the pa due to the lack of good instructions
for the purpose. I am enclosing atomic.h and system.h from the parisc-linux
project in the hope that gives some clues how to solve the problem.
Dave
--
J. David Anglin dave.anglin@nrc.ca
National Research Council of Canada (613) 990-0752 (FAX: 952-6605)
#ifndef _ASM_PARISC_ATOMIC_H_
#define _ASM_PARISC_ATOMIC_H_
#include <asm/system.h>
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* And probably incredibly slow on parisc. OTOH, we don't
* have to write any serious assembly. prumpf
*/
#ifdef CONFIG_SMP
/* we have an array of spinlocks for our atomic_ts, and a hash function
* to get the right index */
# define ATOMIC_HASH_SIZE 1
# define ATOMIC_HASH(a) (&__atomic_hash[0])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
/* copied from <asm/spinlock.h> and modified */
# define SPIN_LOCK(x) \
do { while(__ldcw(&(x)->lock) == 0); } while(0)
# define SPIN_UNLOCK(x) \
do { (x)->lock = 1; } while(0)
#else
# define ATOMIC_HASH_SIZE 1
# define ATOMIC_HASH(a) (0)
/* copied from <linux/spinlock.h> and modified */
# define SPIN_LOCK(x) (void)(x)
# define SPIN_UNLOCK(x) do { } while(0)
#endif
/* copied from <linux/spinlock.h> and modified */
#define SPIN_LOCK_IRQSAVE(lock, flags) do { local_irq_save(flags); SPIN_LOCK(lock); } while (0)
#define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { SPIN_UNLOCK(lock); local_irq_restore(flags); } while (0)
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
*
* Cache-line alignment would conflict with, for example, linux/module.h */
typedef struct {
volatile int counter;
} atomic_t;
/* It's possible to reduce all atomic operations to either
* __atomic_add_return, __atomic_set and __atomic_ret (the latter
* is there only for consistency). */
static __inline__ int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
return ret;
}
static __inline__ void __atomic_set(atomic_t *v, int i)
{
unsigned long flags;
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
v->counter = i;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
}
static __inline__ int __atomic_read(atomic_t *v)
{
return v->counter;
}
/* exported interface */
#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_set(v,i) (__atomic_set((v),i))
#define atomic_read(v) (__atomic_read(v))
#define ATOMIC_INIT(i) { (i) }
#endif
-----------------------------Cut-----------------------------------------------
#ifndef __PARISC_SYSTEM_H
#define __PARISC_SYSTEM_H
#include <asm/psw.h>
struct task_struct;
extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);
#define prepare_to_switch() do { } while(0)
#define switch_to(prev, next, last) do { \
(last) = _switch_to(prev, next); \
} while(0)
/* borrowed this from sparc64 -- probably the SMP case is hosed for us */
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
/* This is simply the barrier() macro from linux/kernel.h but when serial.c
* uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
* hasn't yet been included yet so it fails, thus repeating the macro here.
*/
#define smp_mb() __asm__ __volatile__("":::"memory");
#define smp_rmb() __asm__ __volatile__("":::"memory");
#define smp_wmb() __asm__ __volatile__("":::"memory");
#endif
/* interrupt control */
#define __save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
#define __restore_flags(x) __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory")
#define __cli() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
#define __sti() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
#define local_irq_save(x) \
__asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" )
#define local_irq_restore(x) \
__asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
#ifdef CONFIG_SMP
#else
#define cli() __cli()
#define sti() __sti()
#define save_flags(x) __save_flags(x)
#define restore_flags(x) __restore_flags(x)
#endif
#define mfctl(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
"mfctl " #reg ",%0" : \
"=r" (cr) \
); \
cr; \
})
#define mtctl(gr, cr) \
__asm__ __volatile__("mtctl %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr))
/* these are here to de-mystefy the calling code, and to provide hooks */
/* which I needed for debugging EIEM problems -PB */
#define get_eiem() mfctl(15)
static inline void set_eiem(unsigned long val)
{
mtctl(val, 15);
}
#define mfsp(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
"mfsp " #reg ",%0" : \
"=r" (cr) \
); \
cr; \
})
#define mtsp(gr, cr) \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr))
#define mb() __asm__ __volatile__ ("sync" : : :"memory")
#define wmb() mb()
extern unsigned long __xchg(unsigned long, unsigned long *, int);
#define xchg(ptr,x) \
(__typeof__(*(ptr)))__xchg((unsigned long)(x),(unsigned long*)(ptr),sizeof(*(ptr)))
/* LDCW, the only atomic read-write operation PA-RISC has. Sigh. */
#define __ldcw(a) ({ \
unsigned __ret; \
__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
__ret; \
})
#ifdef CONFIG_SMP
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
volatile unsigned int __attribute__((aligned(16))) lock;
} spinlock_t;
#endif
#endif
More information about the Gcc-bugs
mailing list