OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [acpi/] [osl.c] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  acpi_osl.c - OS-dependent functions ($Revision: 1.1.1.1 $)
3
 *
4
 *  Copyright (C) 2000       Andrew Henroid
5
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7
 *
8
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9
 *
10
 *  This program is free software; you can redistribute it and/or modify
11
 *  it under the terms of the GNU General Public License as published by
12
 *  the Free Software Foundation; either version 2 of the License, or
13
 *  (at your option) any later version.
14
 *
15
 *  This program is distributed in the hope that it will be useful,
16
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
 *  GNU General Public License for more details.
19
 *
20
 *  You should have received a copy of the GNU General Public License
21
 *  along with this program; if not, write to the Free Software
22
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23
 *
24
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25
 *
26
 */
27
 
28
#include <linux/config.h>
29
#include <linux/kernel.h>
30
#include <linux/slab.h>
31
#include <linux/mm.h>
32
#include <linux/pci.h>
33
#include <linux/interrupt.h>
34
#include <linux/kmod.h>
35
#include <linux/delay.h>
36
#include <linux/init.h>
37
#include <linux/nmi.h>
38
#include <asm/io.h>
39
#include <acpi/acpi_bus.h>
40
#include <acpi/acpi.h>
41
 
42
#ifdef CONFIG_ACPI_EFI
43
#include <linux/efi.h>
44
u64 efi_mem_attributes (u64 phys_addr);
45
#endif
46
 
47
 
48
#define _COMPONENT              ACPI_OS_SERVICES
49
ACPI_MODULE_NAME        ("osl")
50
 
51
#define PREFIX          "ACPI: "
52
 
53
struct acpi_os_dpc
54
{
55
    OSD_EXECUTION_CALLBACK  function;
56
    void                    *context;
57
};
58
 
59
 
60
#ifdef ENABLE_DEBUGGER
61
#include <linux/kdb.h>
62
/* stuff for debugger support */
63
int acpi_in_debugger;
64
extern char line_buf[80];
65
#endif /*ENABLE_DEBUGGER*/
66
 
67
static int acpi_irq_irq;
68
static OSD_HANDLER acpi_irq_handler;
69
static void *acpi_irq_context;
70
 
71
 
72
acpi_status
73
acpi_os_initialize(void)
74
{
75
        /*
76
         * Initialize PCI configuration space access, as we'll need to access
77
         * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
78
         */
79
#ifdef CONFIG_ACPI_PCI
80
        pcibios_config_init();
81
        if (!pci_config_read || !pci_config_write) {
82
                printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n");
83
                return AE_NULL_ENTRY;
84
        }
85
#endif
86
 
87
        return AE_OK;
88
}
89
 
90
acpi_status
91
acpi_os_terminate(void)
92
{
93
        if (acpi_irq_handler) {
94
                acpi_os_remove_interrupt_handler(acpi_irq_irq,
95
                                                 acpi_irq_handler);
96
        }
97
 
98
        return AE_OK;
99
}
100
 
101
void
102
acpi_os_printf(const char *fmt,...)
103
{
104
        va_list args;
105
        va_start(args, fmt);
106
        acpi_os_vprintf(fmt, args);
107
        va_end(args);
108
}
109
 
110
void
111
acpi_os_vprintf(const char *fmt, va_list args)
112
{
113
        static char buffer[512];
114
 
115
        vsprintf(buffer, fmt, args);
116
 
117
#ifdef ENABLE_DEBUGGER
118
        if (acpi_in_debugger) {
119
                kdb_printf("%s", buffer);
120
        } else {
121
                printk("%s", buffer);
122
        }
123
#else
124
        printk("%s", buffer);
125
#endif
126
}
127
 
128
void *
129
acpi_os_allocate(acpi_size size)
130
{
131
        return kmalloc(size, GFP_KERNEL);
132
}
133
 
134
void
135
acpi_os_free(void *ptr)
136
{
137
        kfree(ptr);
138
}
139
 
140
acpi_status
141
acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
142
{
143
#ifdef CONFIG_ACPI_EFI
144
        addr->pointer_type = ACPI_PHYSICAL_POINTER;
145
        if (efi.acpi20)
146
                addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi20);
147
        else if (efi.acpi)
148
                addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi);
149
        else {
150
                printk(KERN_ERR PREFIX "System description tables not found\n");
151
                return AE_NOT_FOUND;
152
        }
153
#else
154
        if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) {
155
                printk(KERN_ERR PREFIX "System description tables not found\n");
156
                return AE_NOT_FOUND;
157
        }
158
#endif /*CONFIG_ACPI_EFI*/
159
 
160
        return AE_OK;
161
}
162
 
163
acpi_status
164
acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void **virt)
165
{
166
#ifdef CONFIG_ACPI_EFI
167
        if (EFI_MEMORY_WB & efi_mem_attributes(phys)) {
168
                *virt = phys_to_virt(phys);
169
        } else {
170
                *virt = ioremap(phys, size);
171
        }
172
#else
173
        if (phys > ULONG_MAX) {
174
                printk(KERN_ERR PREFIX "Cannot map memory that high\n");
175
                return AE_BAD_PARAMETER;
176
        }
177
        /*
178
         * ioremap checks to ensure this is in reserved space
179
         */
180
        *virt = ioremap((unsigned long) phys, size);
181
#endif
182
 
183
        if (!*virt)
184
                return AE_NO_MEMORY;
185
 
186
        return AE_OK;
187
}
188
 
189
void
190
acpi_os_unmap_memory(void *virt, acpi_size size)
191
{
192
        iounmap(virt);
193
}
194
 
195
acpi_status
196
acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
197
{
198
        if(!phys || !virt)
199
                return AE_BAD_PARAMETER;
200
 
201
        *phys = virt_to_phys(virt);
202
 
203
        return AE_OK;
204
}
205
 
206
#define ACPI_MAX_OVERRIDE_LEN 100
207
 
208
static char __initdata acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
209
 
210
acpi_status
211
acpi_os_predefined_override (const struct acpi_predefined_names *init_val,
212
                             acpi_string *new_val)
213
{
214
        if (!init_val || !new_val)
215
                return AE_BAD_PARAMETER;
216
 
217
        *new_val = NULL;
218
        if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
219
                printk(KERN_INFO PREFIX "Overriding _OS definition: %s\n",
220
                       acpi_os_name);
221
                *new_val = acpi_os_name;
222
        }
223
 
224
        return AE_OK;
225
}
226
 
227
acpi_status
228
acpi_os_table_override (struct acpi_table_header *existing_table,
229
                        struct acpi_table_header **new_table)
230
{
231
        if (!existing_table || !new_table)
232
                return AE_BAD_PARAMETER;
233
 
234
        *new_table = NULL;
235
        return AE_OK;
236
}
237
 
238
static void
239
acpi_irq(int irq, void *dev_id, struct pt_regs *regs)
240
{
241
        (*acpi_irq_handler)(acpi_irq_context);
242
}
243
 
244
acpi_status
245
acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context)
246
{
247
        /*
248
         * Ignore the irq from the core, and use the value in our copy of the
249
         * FADT. It may not be the same if an interrupt source override exists
250
         * for the SCI.
251
         */
252
        irq = acpi_fadt.sci_int;
253
 
254
#ifdef CONFIG_IA64
255
        irq = acpi_irq_to_vector(irq);
256
        if (irq < 0) {
257
                printk(KERN_ERR PREFIX "SCI (ACPI interrupt %d) not registered\n",
258
                       acpi_fadt.sci_int);
259
                return AE_OK;
260
        }
261
#endif
262
        acpi_irq_irq = irq;
263
        acpi_irq_handler = handler;
264
        acpi_irq_context = context;
265
        if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) {
266
                printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
267
                return AE_NOT_ACQUIRED;
268
        }
269
 
270
        return AE_OK;
271
}
272
 
273
acpi_status
274
acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler)
275
{
276
        if (acpi_irq_handler) {
277
#ifdef CONFIG_IA64
278
                irq = acpi_irq_to_vector(irq);
279
#endif
280
                free_irq(irq, acpi_irq);
281
                acpi_irq_handler = NULL;
282
        }
283
 
284
        return AE_OK;
285
}
286
 
287
/*
288
 * Running in interpreter thread context, safe to sleep
289
 */
290
 
291
void
292
acpi_os_sleep(u32 sec, u32 ms)
293
{
294
        current->state = TASK_INTERRUPTIBLE;
295
        schedule_timeout(HZ * sec + (ms * HZ) / 1000);
296
}
297
 
298
void
299
acpi_os_stall(u32 us)
300
{
301
        while (us) {
302
                u32 delay = 1000;
303
 
304
                if (delay > us)
305
                        delay = us;
306
                udelay(delay);
307
                touch_nmi_watchdog();
308
                us -= delay;
309
        }
310
}
311
 
312
acpi_status
313
acpi_os_read_port(
314
        acpi_io_address port,
315
        u32             *value,
316
        u32             width)
317
{
318
        u32 dummy;
319
 
320
        if (!value)
321
                value = &dummy;
322
 
323
        switch (width)
324
        {
325
        case 8:
326
                *(u8*)  value = inb(port);
327
                break;
328
        case 16:
329
                *(u16*) value = inw(port);
330
                break;
331
        case 32:
332
                *(u32*) value = inl(port);
333
                break;
334
        default:
335
                BUG();
336
        }
337
 
338
        return AE_OK;
339
}
340
 
341
acpi_status
342
acpi_os_write_port(
343
        acpi_io_address port,
344
        u32             value,
345
        u32             width)
346
{
347
        switch (width)
348
        {
349
        case 8:
350
                outb(value, port);
351
                break;
352
        case 16:
353
                outw(value, port);
354
                break;
355
        case 32:
356
                outl(value, port);
357
                break;
358
        default:
359
                BUG();
360
        }
361
 
362
        return AE_OK;
363
}
364
 
365
acpi_status
366
acpi_os_read_memory(
367
        acpi_physical_address   phys_addr,
368
        u32                     *value,
369
        u32                     width)
370
{
371
        u32                     dummy;
372
        void                    *virt_addr;
373
 
374
#ifdef CONFIG_ACPI_EFI
375
        int                     iomem = 0;
376
 
377
        if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
378
                virt_addr = phys_to_virt(phys_addr);
379
        } else {
380
                iomem = 1;
381
                virt_addr = ioremap(phys_addr, width);
382
        }
383
#else
384
        virt_addr = phys_to_virt(phys_addr);
385
#endif
386
        if (!value)
387
                value = &dummy;
388
 
389
        switch (width) {
390
        case 8:
391
                *(u8*) value = *(u8*) virt_addr;
392
                break;
393
        case 16:
394
                *(u16*) value = *(u16*) virt_addr;
395
                break;
396
        case 32:
397
                *(u32*) value = *(u32*) virt_addr;
398
                break;
399
        default:
400
                BUG();
401
        }
402
 
403
#ifdef CONFIG_ACPI_EFI
404
        if (iomem)
405
                iounmap(virt_addr);
406
#endif
407
 
408
        return AE_OK;
409
}
410
 
411
acpi_status
412
acpi_os_write_memory(
413
        acpi_physical_address   phys_addr,
414
        u32                     value,
415
        u32                     width)
416
{
417
        void                    *virt_addr;
418
 
419
#ifdef CONFIG_ACPI_EFI
420
        int                     iomem = 0;
421
 
422
        if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
423
                virt_addr = phys_to_virt(phys_addr);
424
        } else {
425
                iomem = 1;
426
                virt_addr = ioremap(phys_addr, width);
427
        }
428
#else
429
        virt_addr = phys_to_virt(phys_addr);
430
#endif
431
 
432
        switch (width) {
433
        case 8:
434
                *(u8*) virt_addr = value;
435
                break;
436
        case 16:
437
                *(u16*) virt_addr = value;
438
                break;
439
        case 32:
440
                *(u32*) virt_addr = value;
441
                break;
442
        default:
443
                BUG();
444
        }
445
 
446
#ifdef CONFIG_ACPI_EFI
447
        if (iomem)
448
                iounmap(virt_addr);
449
#endif
450
 
451
        return AE_OK;
452
}
453
 
454
#ifdef CONFIG_ACPI_PCI
455
 
456
acpi_status
457
acpi_os_read_pci_configuration (
458
        struct acpi_pci_id      *pci_id,
459
        u32                     reg,
460
        void                    *value,
461
        u32                     width)
462
{
463
        int                     result = 0;
464
        if (!value)
465
                return AE_BAD_PARAMETER;
466
 
467
        switch (width)
468
        {
469
        case 8:
470
                result = pci_config_read(pci_id->segment, pci_id->bus,
471
                        pci_id->device, pci_id->function, reg, 1, value);
472
                break;
473
        case 16:
474
                result = pci_config_read(pci_id->segment, pci_id->bus,
475
                        pci_id->device, pci_id->function, reg, 2, value);
476
                break;
477
        case 32:
478
                result = pci_config_read(pci_id->segment, pci_id->bus,
479
                        pci_id->device, pci_id->function, reg, 4, value);
480
                break;
481
        default:
482
                BUG();
483
        }
484
 
485
        return (result ? AE_ERROR : AE_OK);
486
}
487
 
488
acpi_status
489
acpi_os_write_pci_configuration (
490
        struct acpi_pci_id      *pci_id,
491
        u32                     reg,
492
        acpi_integer            value,
493
        u32                     width)
494
{
495
        int                     result = 0;
496
 
497
        switch (width)
498
        {
499
        case 8:
500
                result = pci_config_write(pci_id->segment, pci_id->bus,
501
                        pci_id->device, pci_id->function, reg, 1, value);
502
                break;
503
        case 16:
504
                result = pci_config_write(pci_id->segment, pci_id->bus,
505
                        pci_id->device, pci_id->function, reg, 2, value);
506
                break;
507
        case 32:
508
                result = pci_config_write(pci_id->segment, pci_id->bus,
509
                        pci_id->device, pci_id->function, reg, 4, value);
510
                break;
511
        default:
512
                BUG();
513
        }
514
 
515
        return (result ? AE_ERROR : AE_OK);
516
}
517
 
518
static void
519
acpi_os_derive_pci_id_2 (
520
        acpi_handle             rhandle,        /* upper bound  */
521
        acpi_handle             chandle,        /* current node */
522
        struct acpi_pci_id      **id,
523
        int                     *is_bridge,
524
        u8                      *bus_number)
525
{
526
        acpi_handle             handle;
527
        struct acpi_pci_id      *pci_id = *id;
528
        acpi_status             status;
529
        unsigned long           temp;
530
        acpi_object_type        type;
531
        u8                      tu8;
532
 
533
        acpi_get_parent(chandle, &handle);
534
        if (handle != rhandle) {
535
                acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, bus_number);
536
 
537
                status = acpi_get_type(handle, &type);
538
                if ( (ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE) )
539
                        return;
540
 
541
                status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &temp);
542
                if (ACPI_SUCCESS(status)) {
543
                        pci_id->device  = ACPI_HIWORD (ACPI_LODWORD (temp));
544
                        pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp));
545
 
546
                        if (*is_bridge)
547
                                pci_id->bus = *bus_number;
548
 
549
                        /* any nicer way to get bus number of bridge ? */
550
                        status = acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 8);
551
                        if (ACPI_SUCCESS(status) &&
552
                            ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
553
                                status = acpi_os_read_pci_configuration(pci_id, 0x18, &tu8, 8);
554
                                if (!ACPI_SUCCESS(status)) {
555
                                        /* Certainly broken...  FIX ME */
556
                                        return;
557
                                }
558
                                *is_bridge = 1;
559
                                pci_id->bus = tu8;
560
                                status = acpi_os_read_pci_configuration(pci_id, 0x19, &tu8, 8);
561
                                if (ACPI_SUCCESS(status)) {
562
                                        *bus_number = tu8;
563
                                }
564
                        } else
565
                                *is_bridge = 0;
566
                }
567
        }
568
}
569
 
570
void
571
acpi_os_derive_pci_id (
572
        acpi_handle             rhandle,        /* upper bound  */
573
        acpi_handle             chandle,        /* current node */
574
        struct acpi_pci_id      **id)
575
{
576
        int is_bridge = 1;
577
        u8 bus_number = (*id)->bus;
578
 
579
        acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
580
}
581
 
582
#else /*!CONFIG_ACPI_PCI*/
583
 
584
acpi_status
585
acpi_os_write_pci_configuration (
586
        struct acpi_pci_id      *pci_id,
587
        u32                     reg,
588
        acpi_integer            value,
589
        u32                     width)
590
{
591
        return (AE_SUPPORT);
592
}
593
 
594
acpi_status
595
acpi_os_read_pci_configuration (
596
        struct acpi_pci_id      *pci_id,
597
        u32                     reg,
598
        void                    *value,
599
        u32                     width)
600
{
601
        return (AE_SUPPORT);
602
}
603
 
604
void
605
acpi_os_derive_pci_id (
606
        acpi_handle             rhandle,        /* upper bound  */
607
        acpi_handle             chandle,        /* current node */
608
        struct acpi_pci_id      **id)
609
{
610
}
611
 
612
#endif /*CONFIG_ACPI_PCI*/
613
 
614
static void
615
acpi_os_execute_deferred (
616
        void *context)
617
{
618
        struct acpi_os_dpc      *dpc = NULL;
619
 
620
        ACPI_FUNCTION_TRACE ("os_execute_deferred");
621
 
622
        dpc = (struct acpi_os_dpc *) context;
623
        if (!dpc) {
624
                ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
625
                return_VOID;
626
        }
627
 
628
        dpc->function(dpc->context);
629
 
630
        kfree(dpc);
631
 
632
        return_VOID;
633
}
634
 
635
acpi_status
636
acpi_os_queue_for_execution(
637
        u32                     priority,
638
        OSD_EXECUTION_CALLBACK  function,
639
        void                    *context)
640
{
641
        acpi_status             status = AE_OK;
642
        struct acpi_os_dpc      *dpc = NULL;
643
        struct tq_struct        *task;
644
 
645
        ACPI_FUNCTION_TRACE ("os_queue_for_execution");
646
 
647
        ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context));
648
 
649
        if (!function)
650
                return_ACPI_STATUS (AE_BAD_PARAMETER);
651
 
652
        /*
653
         * Allocate/initialize DPC structure.  Note that this memory will be
654
         * freed by the callee.  The kernel handles the tq_struct list  in a
655
         * way that allows us to also free its memory inside the callee.
656
         * Because we may want to schedule several tasks with different
657
         * parameters we can't use the approach some kernel code uses of
658
         * having a static tq_struct.
659
         * We can save time and code by allocating the DPC and tq_structs
660
         * from the same memory.
661
         */
662
        dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct tq_struct), GFP_ATOMIC);
663
        if (!dpc)
664
                return_ACPI_STATUS (AE_NO_MEMORY);
665
 
666
        dpc->function = function;
667
        dpc->context = context;
668
 
669
        task = (void *)(dpc+1);
670
        INIT_TQUEUE(task, acpi_os_execute_deferred, (void*)dpc);
671
 
672
        if (!schedule_task(task)) {
673
                ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n"));
674
                kfree(dpc);
675
                status = AE_ERROR;
676
        }
677
 
678
        return_ACPI_STATUS (status);
679
}
680
 
681
/*
682
 * Allocate the memory for a spinlock and initialize it.
683
 */
684
acpi_status
685
acpi_os_create_lock (
686
        acpi_handle     *out_handle)
687
{
688
        spinlock_t *lock_ptr;
689
 
690
        ACPI_FUNCTION_TRACE ("os_create_lock");
691
 
692
        lock_ptr = acpi_os_allocate(sizeof(spinlock_t));
693
 
694
        spin_lock_init(lock_ptr);
695
 
696
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr));
697
 
698
        *out_handle = lock_ptr;
699
 
700
        return_ACPI_STATUS (AE_OK);
701
}
702
 
703
 
704
/*
705
 * Deallocate the memory for a spinlock.
706
 */
707
void
708
acpi_os_delete_lock (
709
        acpi_handle     handle)
710
{
711
        ACPI_FUNCTION_TRACE ("os_create_lock");
712
 
713
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle));
714
 
715
        acpi_os_free(handle);
716
 
717
        return_VOID;
718
}
719
 
720
/*
721
 * Acquire a spinlock.
722
 *
723
 * handle is a pointer to the spinlock_t.
724
 * flags is *not* the result of save_flags - it is an ACPI-specific flag variable
725
 *   that indicates whether we are at interrupt level.
726
 */
727
void
728
acpi_os_acquire_lock (
729
        acpi_handle     handle,
730
        u32             flags)
731
{
732
        ACPI_FUNCTION_TRACE ("os_acquire_lock");
733
 
734
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle,
735
                ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
736
 
737
        if (flags & ACPI_NOT_ISR)
738
                ACPI_DISABLE_IRQS();
739
 
740
        spin_lock((spinlock_t *)handle);
741
 
742
        return_VOID;
743
}
744
 
745
 
746
/*
747
 * Release a spinlock. See above.
748
 */
749
void
750
acpi_os_release_lock (
751
        acpi_handle     handle,
752
        u32             flags)
753
{
754
        ACPI_FUNCTION_TRACE ("os_release_lock");
755
 
756
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle,
757
                ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
758
 
759
        spin_unlock((spinlock_t *)handle);
760
 
761
        if (flags & ACPI_NOT_ISR)
762
                ACPI_ENABLE_IRQS();
763
 
764
        return_VOID;
765
}
766
 
767
 
768
acpi_status
769
acpi_os_create_semaphore(
770
        u32             max_units,
771
        u32             initial_units,
772
        acpi_handle     *handle)
773
{
774
        struct semaphore        *sem = NULL;
775
 
776
        ACPI_FUNCTION_TRACE ("os_create_semaphore");
777
 
778
        sem = acpi_os_allocate(sizeof(struct semaphore));
779
        if (!sem)
780
                return_ACPI_STATUS (AE_NO_MEMORY);
781
        memset(sem, 0, sizeof(struct semaphore));
782
 
783
        sema_init(sem, initial_units);
784
 
785
        *handle = (acpi_handle*)sem;
786
 
787
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units));
788
 
789
        return_ACPI_STATUS (AE_OK);
790
}
791
 
792
 
793
/*
794
 * TODO: A better way to delete semaphores?  Linux doesn't have a
795
 * 'delete_semaphore()' function -- may result in an invalid
796
 * pointer dereference for non-synchronized consumers.  Should
797
 * we at least check for blocked threads and signal/cancel them?
798
 */
799
 
800
acpi_status
801
acpi_os_delete_semaphore(
802
        acpi_handle     handle)
803
{
804
        struct semaphore *sem = (struct semaphore*) handle;
805
 
806
        ACPI_FUNCTION_TRACE ("os_delete_semaphore");
807
 
808
        if (!sem)
809
                return_ACPI_STATUS (AE_BAD_PARAMETER);
810
 
811
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
812
 
813
        acpi_os_free(sem); sem =  NULL;
814
 
815
        return_ACPI_STATUS (AE_OK);
816
}
817
 
818
 
819
/*
820
 * TODO: The kernel doesn't have a 'down_timeout' function -- had to
821
 * improvise.  The process is to sleep for one scheduler quantum
822
 * until the semaphore becomes available.  Downside is that this
823
 * may result in starvation for timeout-based waits when there's
824
 * lots of semaphore activity.
825
 *
826
 * TODO: Support for units > 1?
827
 */
828
acpi_status
829
acpi_os_wait_semaphore(
830
        acpi_handle             handle,
831
        u32                     units,
832
        u16                     timeout)
833
{
834
        acpi_status             status = AE_OK;
835
        struct semaphore        *sem = (struct semaphore*)handle;
836
        int                     ret = 0;
837
 
838
        ACPI_FUNCTION_TRACE ("os_wait_semaphore");
839
 
840
        if (!sem || (units < 1))
841
                return_ACPI_STATUS (AE_BAD_PARAMETER);
842
 
843
        if (units > 1)
844
                return_ACPI_STATUS (AE_SUPPORT);
845
 
846
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout));
847
 
848
        if (in_interrupt())
849
                timeout = 0;
850
 
851
        switch (timeout)
852
        {
853
                /*
854
                 * No Wait:
855
                 * --------
856
                 * A zero timeout value indicates that we shouldn't wait - just
857
                 * acquire the semaphore if available otherwise return AE_TIME
858
                 * (a.k.a. 'would block').
859
                 */
860
                case 0:
861
                if(down_trylock(sem))
862
                        status = AE_TIME;
863
                break;
864
 
865
                /*
866
                 * Wait Indefinitely:
867
                 * ------------------
868
                 */
869
                case ACPI_WAIT_FOREVER:
870
                down(sem);
871
                break;
872
 
873
                /*
874
                 * Wait w/ Timeout:
875
                 * ----------------
876
                 */
877
                default:
878
                // TODO: A better timeout algorithm?
879
                {
880
                        int i = 0;
881
                        static const int quantum_ms = 1000/HZ;
882
 
883
                        ret = down_trylock(sem);
884
                        for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) {
885
                                current->state = TASK_INTERRUPTIBLE;
886
                                schedule_timeout(1);
887
                                ret = down_trylock(sem);
888
                        }
889
 
890
                        if (ret != 0)
891
                                status = AE_TIME;
892
                }
893
                break;
894
        }
895
 
896
        if (ACPI_FAILURE(status)) {
897
                ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Failed to acquire semaphore[%p|%d|%d], %s\n",
898
                        handle, units, timeout, acpi_format_exception(status)));
899
        }
900
        else {
901
                ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout));
902
        }
903
 
904
        return_ACPI_STATUS (status);
905
}
906
 
907
 
908
/*
909
 * TODO: Support for units > 1?
910
 */
911
acpi_status
912
acpi_os_signal_semaphore(
913
    acpi_handle             handle,
914
    u32                     units)
915
{
916
        struct semaphore *sem = (struct semaphore *) handle;
917
 
918
        ACPI_FUNCTION_TRACE ("os_signal_semaphore");
919
 
920
        if (!sem || (units < 1))
921
                return_ACPI_STATUS (AE_BAD_PARAMETER);
922
 
923
        if (units > 1)
924
                return_ACPI_STATUS (AE_SUPPORT);
925
 
926
        ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units));
927
 
928
        up(sem);
929
 
930
        return_ACPI_STATUS (AE_OK);
931
}
932
 
933
u32
934
acpi_os_get_line(char *buffer)
935
{
936
 
937
#ifdef ENABLE_DEBUGGER
938
        if (acpi_in_debugger) {
939
                u32 chars;
940
 
941
                kdb_read(buffer, sizeof(line_buf));
942
 
943
                /* remove the CR kdb includes */
944
                chars = strlen(buffer) - 1;
945
                buffer[chars] = '\0';
946
        }
947
#endif
948
 
949
        return 0;
950
}
951
 
952
/*
953
 * We just have to assume we're dealing with valid memory
954
 */
955
 
956
u8
957
acpi_os_readable(void *ptr, acpi_size len)
958
{
959
        return 1;
960
}
961
 
962
u8
963
acpi_os_writable(void *ptr, acpi_size len)
964
{
965
        return 1;
966
}
967
 
968
u32
969
acpi_os_get_thread_id (void)
970
{
971
        if (!in_interrupt())
972
                return current->pid;
973
 
974
        return 0;
975
}
976
 
977
acpi_status
978
acpi_os_signal (
979
    u32         function,
980
    void        *info)
981
{
982
        switch (function)
983
        {
984
        case ACPI_SIGNAL_FATAL:
985
                printk(KERN_ERR PREFIX "Fatal opcode executed\n");
986
                break;
987
        case ACPI_SIGNAL_BREAKPOINT:
988
                {
989
                        char *bp_info = (char*) info;
990
 
991
                        printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info);
992
                }
993
        default:
994
                break;
995
        }
996
 
997
        return AE_OK;
998
}
999
 
1000
int __init
1001
acpi_os_name_setup(char *str)
1002
{
1003
        char *p = acpi_os_name;
1004
        int count = ACPI_MAX_OVERRIDE_LEN-1;
1005
 
1006
        if (!str || !*str)
1007
                return 0;
1008
 
1009
        for (; count-- && str && *str; str++) {
1010
                if (isalnum(*str) || *str == ' ' || *str == ':')
1011
                        *p++ = *str;
1012
                else if (*str == '\'' || *str == '"')
1013
                        continue;
1014
                else
1015
                        break;
1016
        }
1017
        *p = 0;
1018
 
1019
        return 1;
1020
 
1021
}
1022
 
1023
__setup("acpi_os_name=", acpi_os_name_setup);
1024
 
1025
/*
1026
 * _OSI control
1027
 * empty string disables _OSI
1028
 * TBD additional string adds to _OSI
1029
 */
1030
int __init
1031
acpi_osi_setup(char *str)
1032
{
1033
        if (str == NULL || *str == '\0') {
1034
                printk(KERN_INFO PREFIX "_OSI method disabled\n");
1035
                acpi_gbl_create_osi_method = FALSE;
1036
        } else
1037
        {
1038
                /* TBD */
1039
                printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n", str);
1040
        }
1041
 
1042
        return 1;
1043
}
1044
 
1045
__setup("acpi_osi=", acpi_osi_setup);
1046
 
1047
/* enable serialization to combat AE_ALREADY_EXISTS errors */
1048
int __init
1049
acpi_serialize_setup(char *str)
1050
{
1051
        printk(KERN_INFO PREFIX "serialize enabled\n");
1052
 
1053
        acpi_gbl_all_methods_serialized = TRUE;
1054
 
1055
        return 1;
1056
}
1057
 
1058
__setup("acpi_serialize", acpi_serialize_setup);
1059
 
1060
/*
1061
 * Wake and Run-Time GPES are expected to be separate.
1062
 * We disable wake-GPEs at run-time to prevent spurious
1063
 * interrupts.
1064
 *
1065
 * However, if a system exists that shares Wake and
1066
 * Run-time events on the same GPE this flag is available
1067
 * to tell Linux to keep the wake-time GPEs enabled at run-time.
1068
 */
1069
int __init
1070
acpi_wake_gpes_always_on_setup(char *str)
1071
{
1072
        printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1073
 
1074
        acpi_gbl_leave_wake_gpes_disabled = FALSE;
1075
 
1076
        return 1;
1077
}
1078
 
1079
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1080
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.