OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [acpi/] [osl.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3
 *
4
 *  Copyright (C) 2000       Andrew Henroid
5
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7
 *
8
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9
 *
10
 *  This program is free software; you can redistribute it and/or modify
11
 *  it under the terms of the GNU General Public License as published by
12
 *  the Free Software Foundation; either version 2 of the License, or
13
 *  (at your option) any later version.
14
 *
15
 *  This program is distributed in the hope that it will be useful,
16
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
 *  GNU General Public License for more details.
19
 *
20
 *  You should have received a copy of the GNU General Public License
21
 *  along with this program; if not, write to the Free Software
22
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23
 *
24
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25
 *
26
 */
27
 
28
#include <linux/module.h>
29
#include <linux/kernel.h>
30
#include <linux/slab.h>
31
#include <linux/mm.h>
32
#include <linux/pci.h>
33
#include <linux/interrupt.h>
34
#include <linux/kmod.h>
35
#include <linux/delay.h>
36
#include <linux/dmi.h>
37
#include <linux/workqueue.h>
38
#include <linux/nmi.h>
39
#include <linux/acpi.h>
40
#include <acpi/acpi.h>
41
#include <asm/io.h>
42
#include <acpi/acpi_bus.h>
43
#include <acpi/processor.h>
44
#include <asm/uaccess.h>
45
 
46
#include <linux/efi.h>
47
 
48
#define _COMPONENT              ACPI_OS_SERVICES
49
ACPI_MODULE_NAME("osl");
50
#define PREFIX          "ACPI: "
51
struct acpi_os_dpc {
52
        acpi_osd_exec_callback function;
53
        void *context;
54
        struct work_struct work;
55
};
56
 
57
#ifdef CONFIG_ACPI_CUSTOM_DSDT
58
#include CONFIG_ACPI_CUSTOM_DSDT_FILE
59
#endif
60
 
61
#ifdef ENABLE_DEBUGGER
62
#include <linux/kdb.h>
63
 
64
/* stuff for debugger support */
65
int acpi_in_debugger;
66
EXPORT_SYMBOL(acpi_in_debugger);
67
 
68
extern char line_buf[80];
69
#endif                          /*ENABLE_DEBUGGER */
70
 
71
static unsigned int acpi_irq_irq;
72
static acpi_osd_handler acpi_irq_handler;
73
static void *acpi_irq_context;
74
static struct workqueue_struct *kacpid_wq;
75
static struct workqueue_struct *kacpi_notify_wq;
76
 
77
#define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
78
static char osi_additional_string[OSI_STRING_LENGTH_MAX];
79
 
80
/*
81
 * "Ode to _OSI(Linux)"
82
 *
83
 * osi_linux -- Control response to BIOS _OSI(Linux) query.
84
 *
85
 * As Linux evolves, the features that it supports change.
86
 * So an OSI string such as "Linux" is not specific enough
87
 * to be useful across multiple versions of Linux.  It
88
 * doesn't identify any particular feature, interface,
89
 * or even any particular version of Linux...
90
 *
91
 * Unfortunately, Linux-2.6.22 and earlier responded "yes"
92
 * to a BIOS _OSI(Linux) query.  When
93
 * a reference mobile BIOS started using it, its use
94
 * started to spread to many vendor platforms.
95
 * As it is not supportable, we need to halt that spread.
96
 *
97
 * Today, most BIOS references to _OSI(Linux) are noise --
98
 * they have no functional effect and are just dead code
99
 * carried over from the reference BIOS.
100
 *
101
 * The next most common case is that _OSI(Linux) harms Linux,
102
 * usually by causing the BIOS to follow paths that are
103
 * not tested during Windows validation.
104
 *
105
 * Finally, there is a short list of platforms
106
 * where OSI(Linux) benefits Linux.
107
 *
108
 * In Linux-2.6.23, OSI(Linux) is first disabled by default.
109
 * DMI is used to disable the dmesg warning about OSI(Linux)
110
 * on platforms where it is known to have no effect.
111
 * But a dmesg warning remains for systems where
112
 * we do not know if OSI(Linux) is good or bad for the system.
113
 * DMI is also used to enable OSI(Linux) for the machines
114
 * that are known to need it.
115
 *
116
 * BIOS writers should NOT query _OSI(Linux) on future systems.
117
 * It will be ignored by default, and to get Linux to
118
 * not ignore it will require a kernel source update to
119
 * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
120
 */
121
#define OSI_LINUX_ENABLE 0
122
 
123
struct osi_linux {
124
        unsigned int    enable:1;
125
        unsigned int    dmi:1;
126
        unsigned int    cmdline:1;
127
        unsigned int    known:1;
128
} osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
129
 
130
static void __init acpi_request_region (struct acpi_generic_address *addr,
131
        unsigned int length, char *desc)
132
{
133
        struct resource *res;
134
 
135
        if (!addr->address || !length)
136
                return;
137
 
138
        if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
139
                res = request_region(addr->address, length, desc);
140
        else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
141
                res = request_mem_region(addr->address, length, desc);
142
}
143
 
144
static int __init acpi_reserve_resources(void)
145
{
146
        acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
147
                "ACPI PM1a_EVT_BLK");
148
 
149
        acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
150
                "ACPI PM1b_EVT_BLK");
151
 
152
        acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
153
                "ACPI PM1a_CNT_BLK");
154
 
155
        acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
156
                "ACPI PM1b_CNT_BLK");
157
 
158
        if (acpi_gbl_FADT.pm_timer_length == 4)
159
                acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
160
 
161
        acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
162
                "ACPI PM2_CNT_BLK");
163
 
164
        /* Length of GPE blocks must be a non-negative multiple of 2 */
165
 
166
        if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
167
                acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
168
                               acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
169
 
170
        if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
171
                acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
172
                               acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
173
 
174
        return 0;
175
}
176
device_initcall(acpi_reserve_resources);
177
 
178
acpi_status __init acpi_os_initialize(void)
179
{
180
        return AE_OK;
181
}
182
 
183
acpi_status acpi_os_initialize1(void)
184
{
185
        /*
186
         * Initialize PCI configuration space access, as we'll need to access
187
         * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
188
         */
189
        if (!raw_pci_ops) {
190
                printk(KERN_ERR PREFIX
191
                       "Access to PCI configuration space unavailable\n");
192
                return AE_NULL_ENTRY;
193
        }
194
        kacpid_wq = create_singlethread_workqueue("kacpid");
195
        kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
196
        BUG_ON(!kacpid_wq);
197
        BUG_ON(!kacpi_notify_wq);
198
        return AE_OK;
199
}
200
 
201
acpi_status acpi_os_terminate(void)
202
{
203
        if (acpi_irq_handler) {
204
                acpi_os_remove_interrupt_handler(acpi_irq_irq,
205
                                                 acpi_irq_handler);
206
        }
207
 
208
        destroy_workqueue(kacpid_wq);
209
        destroy_workqueue(kacpi_notify_wq);
210
 
211
        return AE_OK;
212
}
213
 
214
void acpi_os_printf(const char *fmt, ...)
215
{
216
        va_list args;
217
        va_start(args, fmt);
218
        acpi_os_vprintf(fmt, args);
219
        va_end(args);
220
}
221
 
222
EXPORT_SYMBOL(acpi_os_printf);
223
 
224
void acpi_os_vprintf(const char *fmt, va_list args)
225
{
226
        static char buffer[512];
227
 
228
        vsprintf(buffer, fmt, args);
229
 
230
#ifdef ENABLE_DEBUGGER
231
        if (acpi_in_debugger) {
232
                kdb_printf("%s", buffer);
233
        } else {
234
                printk("%s", buffer);
235
        }
236
#else
237
        printk("%s", buffer);
238
#endif
239
}
240
 
241
acpi_physical_address __init acpi_os_get_root_pointer(void)
242
{
243
        if (efi_enabled) {
244
                if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
245
                        return efi.acpi20;
246
                else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
247
                        return efi.acpi;
248
                else {
249
                        printk(KERN_ERR PREFIX
250
                               "System description tables not found\n");
251
                        return 0;
252
                }
253
        } else
254
                return acpi_find_rsdp();
255
}
256
 
257
void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
258
{
259
        if (phys > ULONG_MAX) {
260
                printk(KERN_ERR PREFIX "Cannot map memory that high\n");
261
                return NULL;
262
        }
263
        if (acpi_gbl_permanent_mmap)
264
                /*
265
                * ioremap checks to ensure this is in reserved space
266
                */
267
                return ioremap((unsigned long)phys, size);
268
        else
269
                return __acpi_map_table((unsigned long)phys, size);
270
}
271
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
272
 
273
void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
274
{
275
        if (acpi_gbl_permanent_mmap) {
276
                iounmap(virt);
277
        }
278
}
279
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
280
 
281
#ifdef ACPI_FUTURE_USAGE
282
acpi_status
283
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
284
{
285
        if (!phys || !virt)
286
                return AE_BAD_PARAMETER;
287
 
288
        *phys = virt_to_phys(virt);
289
 
290
        return AE_OK;
291
}
292
#endif
293
 
294
#define ACPI_MAX_OVERRIDE_LEN 100
295
 
296
static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
297
 
298
acpi_status
299
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
300
                            acpi_string * new_val)
301
{
302
        if (!init_val || !new_val)
303
                return AE_BAD_PARAMETER;
304
 
305
        *new_val = NULL;
306
        if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
307
                printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
308
                       acpi_os_name);
309
                *new_val = acpi_os_name;
310
        }
311
 
312
        return AE_OK;
313
}
314
 
315
acpi_status
316
acpi_os_table_override(struct acpi_table_header * existing_table,
317
                       struct acpi_table_header ** new_table)
318
{
319
        if (!existing_table || !new_table)
320
                return AE_BAD_PARAMETER;
321
 
322
#ifdef CONFIG_ACPI_CUSTOM_DSDT
323
        if (strncmp(existing_table->signature, "DSDT", 4) == 0)
324
                *new_table = (struct acpi_table_header *)AmlCode;
325
        else
326
                *new_table = NULL;
327
#else
328
        *new_table = NULL;
329
#endif
330
        return AE_OK;
331
}
332
 
333
static irqreturn_t acpi_irq(int irq, void *dev_id)
334
{
335
        return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
336
}
337
 
338
acpi_status
339
acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
340
                                  void *context)
341
{
342
        unsigned int irq;
343
 
344
        /*
345
         * Ignore the GSI from the core, and use the value in our copy of the
346
         * FADT. It may not be the same if an interrupt source override exists
347
         * for the SCI.
348
         */
349
        gsi = acpi_gbl_FADT.sci_interrupt;
350
        if (acpi_gsi_to_irq(gsi, &irq) < 0) {
351
                printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
352
                       gsi);
353
                return AE_OK;
354
        }
355
 
356
        acpi_irq_handler = handler;
357
        acpi_irq_context = context;
358
        if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
359
                printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
360
                return AE_NOT_ACQUIRED;
361
        }
362
        acpi_irq_irq = irq;
363
 
364
        return AE_OK;
365
}
366
 
367
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
368
{
369
        if (irq) {
370
                free_irq(irq, acpi_irq);
371
                acpi_irq_handler = NULL;
372
                acpi_irq_irq = 0;
373
        }
374
 
375
        return AE_OK;
376
}
377
 
378
/*
379
 * Running in interpreter thread context, safe to sleep
380
 */
381
 
382
void acpi_os_sleep(acpi_integer ms)
383
{
384
        schedule_timeout_interruptible(msecs_to_jiffies(ms));
385
}
386
 
387
EXPORT_SYMBOL(acpi_os_sleep);
388
 
389
void acpi_os_stall(u32 us)
390
{
391
        while (us) {
392
                u32 delay = 1000;
393
 
394
                if (delay > us)
395
                        delay = us;
396
                udelay(delay);
397
                touch_nmi_watchdog();
398
                us -= delay;
399
        }
400
}
401
 
402
EXPORT_SYMBOL(acpi_os_stall);
403
 
404
/*
405
 * Support ACPI 3.0 AML Timer operand
406
 * Returns 64-bit free-running, monotonically increasing timer
407
 * with 100ns granularity
408
 */
409
u64 acpi_os_get_timer(void)
410
{
411
        static u64 t;
412
 
413
#ifdef  CONFIG_HPET
414
        /* TBD: use HPET if available */
415
#endif
416
 
417
#ifdef  CONFIG_X86_PM_TIMER
418
        /* TBD: default to PM timer if HPET was not available */
419
#endif
420
        if (!t)
421
                printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
422
 
423
        return ++t;
424
}
425
 
426
acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
427
{
428
        u32 dummy;
429
 
430
        if (!value)
431
                value = &dummy;
432
 
433
        *value = 0;
434
        if (width <= 8) {
435
                *(u8 *) value = inb(port);
436
        } else if (width <= 16) {
437
                *(u16 *) value = inw(port);
438
        } else if (width <= 32) {
439
                *(u32 *) value = inl(port);
440
        } else {
441
                BUG();
442
        }
443
 
444
        return AE_OK;
445
}
446
 
447
EXPORT_SYMBOL(acpi_os_read_port);
448
 
449
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
450
{
451
        if (width <= 8) {
452
                outb(value, port);
453
        } else if (width <= 16) {
454
                outw(value, port);
455
        } else if (width <= 32) {
456
                outl(value, port);
457
        } else {
458
                BUG();
459
        }
460
 
461
        return AE_OK;
462
}
463
 
464
EXPORT_SYMBOL(acpi_os_write_port);
465
 
466
acpi_status
467
acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
468
{
469
        u32 dummy;
470
        void __iomem *virt_addr;
471
 
472
        virt_addr = ioremap(phys_addr, width);
473
        if (!value)
474
                value = &dummy;
475
 
476
        switch (width) {
477
        case 8:
478
                *(u8 *) value = readb(virt_addr);
479
                break;
480
        case 16:
481
                *(u16 *) value = readw(virt_addr);
482
                break;
483
        case 32:
484
                *(u32 *) value = readl(virt_addr);
485
                break;
486
        default:
487
                BUG();
488
        }
489
 
490
        iounmap(virt_addr);
491
 
492
        return AE_OK;
493
}
494
 
495
acpi_status
496
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
497
{
498
        void __iomem *virt_addr;
499
 
500
        virt_addr = ioremap(phys_addr, width);
501
 
502
        switch (width) {
503
        case 8:
504
                writeb(value, virt_addr);
505
                break;
506
        case 16:
507
                writew(value, virt_addr);
508
                break;
509
        case 32:
510
                writel(value, virt_addr);
511
                break;
512
        default:
513
                BUG();
514
        }
515
 
516
        iounmap(virt_addr);
517
 
518
        return AE_OK;
519
}
520
 
521
acpi_status
522
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
523
                               void *value, u32 width)
524
{
525
        int result, size;
526
 
527
        if (!value)
528
                return AE_BAD_PARAMETER;
529
 
530
        switch (width) {
531
        case 8:
532
                size = 1;
533
                break;
534
        case 16:
535
                size = 2;
536
                break;
537
        case 32:
538
                size = 4;
539
                break;
540
        default:
541
                return AE_ERROR;
542
        }
543
 
544
        BUG_ON(!raw_pci_ops);
545
 
546
        result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
547
                                   PCI_DEVFN(pci_id->device, pci_id->function),
548
                                   reg, size, value);
549
 
550
        return (result ? AE_ERROR : AE_OK);
551
}
552
 
553
EXPORT_SYMBOL(acpi_os_read_pci_configuration);
554
 
555
acpi_status
556
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
557
                                acpi_integer value, u32 width)
558
{
559
        int result, size;
560
 
561
        switch (width) {
562
        case 8:
563
                size = 1;
564
                break;
565
        case 16:
566
                size = 2;
567
                break;
568
        case 32:
569
                size = 4;
570
                break;
571
        default:
572
                return AE_ERROR;
573
        }
574
 
575
        BUG_ON(!raw_pci_ops);
576
 
577
        result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
578
                                    PCI_DEVFN(pci_id->device, pci_id->function),
579
                                    reg, size, value);
580
 
581
        return (result ? AE_ERROR : AE_OK);
582
}
583
 
584
/* TODO: Change code to take advantage of driver model more */
585
static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
586
                                    acpi_handle chandle,        /* current node */
587
                                    struct acpi_pci_id **id,
588
                                    int *is_bridge, u8 * bus_number)
589
{
590
        acpi_handle handle;
591
        struct acpi_pci_id *pci_id = *id;
592
        acpi_status status;
593
        unsigned long temp;
594
        acpi_object_type type;
595
        u8 tu8;
596
 
597
        acpi_get_parent(chandle, &handle);
598
        if (handle != rhandle) {
599
                acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
600
                                        bus_number);
601
 
602
                status = acpi_get_type(handle, &type);
603
                if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
604
                        return;
605
 
606
                status =
607
                    acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
608
                                          &temp);
609
                if (ACPI_SUCCESS(status)) {
610
                        pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
611
                        pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
612
 
613
                        if (*is_bridge)
614
                                pci_id->bus = *bus_number;
615
 
616
                        /* any nicer way to get bus number of bridge ? */
617
                        status =
618
                            acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
619
                                                           8);
620
                        if (ACPI_SUCCESS(status)
621
                            && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
622
                                status =
623
                                    acpi_os_read_pci_configuration(pci_id, 0x18,
624
                                                                   &tu8, 8);
625
                                if (!ACPI_SUCCESS(status)) {
626
                                        /* Certainly broken...  FIX ME */
627
                                        return;
628
                                }
629
                                *is_bridge = 1;
630
                                pci_id->bus = tu8;
631
                                status =
632
                                    acpi_os_read_pci_configuration(pci_id, 0x19,
633
                                                                   &tu8, 8);
634
                                if (ACPI_SUCCESS(status)) {
635
                                        *bus_number = tu8;
636
                                }
637
                        } else
638
                                *is_bridge = 0;
639
                }
640
        }
641
}
642
 
643
void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
644
                           acpi_handle chandle, /* current node */
645
                           struct acpi_pci_id **id)
646
{
647
        int is_bridge = 1;
648
        u8 bus_number = (*id)->bus;
649
 
650
        acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
651
}
652
 
653
static void acpi_os_execute_deferred(struct work_struct *work)
654
{
655
        struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
656
        if (!dpc) {
657
                printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
658
                return;
659
        }
660
 
661
        dpc->function(dpc->context);
662
        kfree(dpc);
663
 
664
        /* Yield cpu to notify thread */
665
        cond_resched();
666
 
667
        return;
668
}
669
 
670
static void acpi_os_execute_notify(struct work_struct *work)
671
{
672
        struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
673
 
674
        if (!dpc) {
675
                printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
676
                return;
677
        }
678
 
679
        dpc->function(dpc->context);
680
 
681
        kfree(dpc);
682
 
683
        return;
684
}
685
 
686
/*******************************************************************************
687
 *
688
 * FUNCTION:    acpi_os_execute
689
 *
690
 * PARAMETERS:  Type               - Type of the callback
691
 *              Function           - Function to be executed
692
 *              Context            - Function parameters
693
 *
694
 * RETURN:      Status
695
 *
696
 * DESCRIPTION: Depending on type, either queues function for deferred execution or
697
 *              immediately executes function on a separate thread.
698
 *
699
 ******************************************************************************/
700
 
701
acpi_status acpi_os_execute(acpi_execute_type type,
702
                            acpi_osd_exec_callback function, void *context)
703
{
704
        acpi_status status = AE_OK;
705
        struct acpi_os_dpc *dpc;
706
 
707
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
708
                          "Scheduling function [%p(%p)] for deferred execution.\n",
709
                          function, context));
710
 
711
        if (!function)
712
                return AE_BAD_PARAMETER;
713
 
714
        /*
715
         * Allocate/initialize DPC structure.  Note that this memory will be
716
         * freed by the callee.  The kernel handles the work_struct list  in a
717
         * way that allows us to also free its memory inside the callee.
718
         * Because we may want to schedule several tasks with different
719
         * parameters we can't use the approach some kernel code uses of
720
         * having a static work_struct.
721
         */
722
 
723
        dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
724
        if (!dpc)
725
                return_ACPI_STATUS(AE_NO_MEMORY);
726
 
727
        dpc->function = function;
728
        dpc->context = context;
729
 
730
        if (type == OSL_NOTIFY_HANDLER) {
731
                INIT_WORK(&dpc->work, acpi_os_execute_notify);
732
                if (!queue_work(kacpi_notify_wq, &dpc->work)) {
733
                        status = AE_ERROR;
734
                        kfree(dpc);
735
                }
736
        } else {
737
                INIT_WORK(&dpc->work, acpi_os_execute_deferred);
738
                if (!queue_work(kacpid_wq, &dpc->work)) {
739
                        ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
740
                                  "Call to queue_work() failed.\n"));
741
                        status = AE_ERROR;
742
                        kfree(dpc);
743
                }
744
        }
745
        return_ACPI_STATUS(status);
746
}
747
 
748
EXPORT_SYMBOL(acpi_os_execute);
749
 
750
void acpi_os_wait_events_complete(void *context)
751
{
752
        flush_workqueue(kacpid_wq);
753
}
754
 
755
EXPORT_SYMBOL(acpi_os_wait_events_complete);
756
 
757
/*
758
 * Allocate the memory for a spinlock and initialize it.
759
 */
760
acpi_status acpi_os_create_lock(acpi_spinlock * handle)
761
{
762
        spin_lock_init(*handle);
763
 
764
        return AE_OK;
765
}
766
 
767
/*
768
 * Deallocate the memory for a spinlock.
769
 */
770
void acpi_os_delete_lock(acpi_spinlock handle)
771
{
772
        return;
773
}
774
 
775
acpi_status
776
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
777
{
778
        struct semaphore *sem = NULL;
779
 
780
 
781
        sem = acpi_os_allocate(sizeof(struct semaphore));
782
        if (!sem)
783
                return AE_NO_MEMORY;
784
        memset(sem, 0, sizeof(struct semaphore));
785
 
786
        sema_init(sem, initial_units);
787
 
788
        *handle = (acpi_handle *) sem;
789
 
790
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
791
                          *handle, initial_units));
792
 
793
        return AE_OK;
794
}
795
 
796
EXPORT_SYMBOL(acpi_os_create_semaphore);
797
 
798
/*
799
 * TODO: A better way to delete semaphores?  Linux doesn't have a
800
 * 'delete_semaphore()' function -- may result in an invalid
801
 * pointer dereference for non-synchronized consumers.  Should
802
 * we at least check for blocked threads and signal/cancel them?
803
 */
804
 
805
acpi_status acpi_os_delete_semaphore(acpi_handle handle)
806
{
807
        struct semaphore *sem = (struct semaphore *)handle;
808
 
809
 
810
        if (!sem)
811
                return AE_BAD_PARAMETER;
812
 
813
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
814
 
815
        kfree(sem);
816
        sem = NULL;
817
 
818
        return AE_OK;
819
}
820
 
821
EXPORT_SYMBOL(acpi_os_delete_semaphore);
822
 
823
/*
824
 * TODO: The kernel doesn't have a 'down_timeout' function -- had to
825
 * improvise.  The process is to sleep for one scheduler quantum
826
 * until the semaphore becomes available.  Downside is that this
827
 * may result in starvation for timeout-based waits when there's
828
 * lots of semaphore activity.
829
 *
830
 * TODO: Support for units > 1?
831
 */
832
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
833
{
834
        acpi_status status = AE_OK;
835
        struct semaphore *sem = (struct semaphore *)handle;
836
        int ret = 0;
837
 
838
 
839
        if (!sem || (units < 1))
840
                return AE_BAD_PARAMETER;
841
 
842
        if (units > 1)
843
                return AE_SUPPORT;
844
 
845
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
846
                          handle, units, timeout));
847
 
848
        /*
849
         * This can be called during resume with interrupts off.
850
         * Like boot-time, we should be single threaded and will
851
         * always get the lock if we try -- timeout or not.
852
         * If this doesn't succeed, then we will oops courtesy of
853
         * might_sleep() in down().
854
         */
855
        if (!down_trylock(sem))
856
                return AE_OK;
857
 
858
        switch (timeout) {
859
                /*
860
                 * No Wait:
861
                 * --------
862
                 * A zero timeout value indicates that we shouldn't wait - just
863
                 * acquire the semaphore if available otherwise return AE_TIME
864
                 * (a.k.a. 'would block').
865
                 */
866
        case 0:
867
                if (down_trylock(sem))
868
                        status = AE_TIME;
869
                break;
870
 
871
                /*
872
                 * Wait Indefinitely:
873
                 * ------------------
874
                 */
875
        case ACPI_WAIT_FOREVER:
876
                down(sem);
877
                break;
878
 
879
                /*
880
                 * Wait w/ Timeout:
881
                 * ----------------
882
                 */
883
        default:
884
                // TODO: A better timeout algorithm?
885
                {
886
                        int i = 0;
887
                        static const int quantum_ms = 1000 / HZ;
888
 
889
                        ret = down_trylock(sem);
890
                        for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
891
                                schedule_timeout_interruptible(1);
892
                                ret = down_trylock(sem);
893
                        }
894
 
895
                        if (ret != 0)
896
                                status = AE_TIME;
897
                }
898
                break;
899
        }
900
 
901
        if (ACPI_FAILURE(status)) {
902
                ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
903
                                  "Failed to acquire semaphore[%p|%d|%d], %s",
904
                                  handle, units, timeout,
905
                                  acpi_format_exception(status)));
906
        } else {
907
                ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
908
                                  "Acquired semaphore[%p|%d|%d]", handle,
909
                                  units, timeout));
910
        }
911
 
912
        return status;
913
}
914
 
915
EXPORT_SYMBOL(acpi_os_wait_semaphore);
916
 
917
/*
918
 * TODO: Support for units > 1?
919
 */
920
acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
921
{
922
        struct semaphore *sem = (struct semaphore *)handle;
923
 
924
 
925
        if (!sem || (units < 1))
926
                return AE_BAD_PARAMETER;
927
 
928
        if (units > 1)
929
                return AE_SUPPORT;
930
 
931
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
932
                          units));
933
 
934
        up(sem);
935
 
936
        return AE_OK;
937
}
938
 
939
EXPORT_SYMBOL(acpi_os_signal_semaphore);
940
 
941
#ifdef ACPI_FUTURE_USAGE
942
u32 acpi_os_get_line(char *buffer)
943
{
944
 
945
#ifdef ENABLE_DEBUGGER
946
        if (acpi_in_debugger) {
947
                u32 chars;
948
 
949
                kdb_read(buffer, sizeof(line_buf));
950
 
951
                /* remove the CR kdb includes */
952
                chars = strlen(buffer) - 1;
953
                buffer[chars] = '\0';
954
        }
955
#endif
956
 
957
        return 0;
958
}
959
#endif                          /*  ACPI_FUTURE_USAGE  */
960
 
961
acpi_status acpi_os_signal(u32 function, void *info)
962
{
963
        switch (function) {
964
        case ACPI_SIGNAL_FATAL:
965
                printk(KERN_ERR PREFIX "Fatal opcode executed\n");
966
                break;
967
        case ACPI_SIGNAL_BREAKPOINT:
968
                /*
969
                 * AML Breakpoint
970
                 * ACPI spec. says to treat it as a NOP unless
971
                 * you are debugging.  So if/when we integrate
972
                 * AML debugger into the kernel debugger its
973
                 * hook will go here.  But until then it is
974
                 * not useful to print anything on breakpoints.
975
                 */
976
                break;
977
        default:
978
                break;
979
        }
980
 
981
        return AE_OK;
982
}
983
 
984
EXPORT_SYMBOL(acpi_os_signal);
985
 
986
static int __init acpi_os_name_setup(char *str)
987
{
988
        char *p = acpi_os_name;
989
        int count = ACPI_MAX_OVERRIDE_LEN - 1;
990
 
991
        if (!str || !*str)
992
                return 0;
993
 
994
        for (; count-- && str && *str; str++) {
995
                if (isalnum(*str) || *str == ' ' || *str == ':')
996
                        *p++ = *str;
997
                else if (*str == '\'' || *str == '"')
998
                        continue;
999
                else
1000
                        break;
1001
        }
1002
        *p = 0;
1003
 
1004
        return 1;
1005
 
1006
}
1007
 
1008
__setup("acpi_os_name=", acpi_os_name_setup);
1009
 
1010
static void __init set_osi_linux(unsigned int enable)
1011
{
1012
        if (osi_linux.enable != enable) {
1013
                osi_linux.enable = enable;
1014
                printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1015
                        enable ? "Add": "Delet");
1016
        }
1017
        return;
1018
}
1019
 
1020
static void __init acpi_cmdline_osi_linux(unsigned int enable)
1021
{
1022
        osi_linux.cmdline = 1;  /* cmdline set the default */
1023
        set_osi_linux(enable);
1024
 
1025
        return;
1026
}
1027
 
1028
void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1029
{
1030
        osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1031
 
1032
        printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1033
 
1034
        if (enable == -1)
1035
                return;
1036
 
1037
        osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1038
 
1039
        set_osi_linux(enable);
1040
 
1041
        return;
1042
}
1043
 
1044
/*
1045
 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1046
 *
1047
 * empty string disables _OSI
1048
 * string starting with '!' disables that string
1049
 * otherwise string is added to list, augmenting built-in strings
1050
 */
1051
static int __init acpi_osi_setup(char *str)
1052
{
1053
        if (str == NULL || *str == '\0') {
1054
                printk(KERN_INFO PREFIX "_OSI method disabled\n");
1055
                acpi_gbl_create_osi_method = FALSE;
1056
        } else if (!strcmp("!Linux", str)) {
1057
                acpi_cmdline_osi_linux(0);       /* !enable */
1058
        } else if (*str == '!') {
1059
                if (acpi_osi_invalidate(++str) == AE_OK)
1060
                        printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1061
        } else if (!strcmp("Linux", str)) {
1062
                acpi_cmdline_osi_linux(1);      /* enable */
1063
        } else if (*osi_additional_string == '\0') {
1064
                strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1065
                printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1066
        }
1067
 
1068
        return 1;
1069
}
1070
 
1071
__setup("acpi_osi=", acpi_osi_setup);
1072
 
1073
/* enable serialization to combat AE_ALREADY_EXISTS errors */
1074
static int __init acpi_serialize_setup(char *str)
1075
{
1076
        printk(KERN_INFO PREFIX "serialize enabled\n");
1077
 
1078
        acpi_gbl_all_methods_serialized = TRUE;
1079
 
1080
        return 1;
1081
}
1082
 
1083
__setup("acpi_serialize", acpi_serialize_setup);
1084
 
1085
/*
1086
 * Wake and Run-Time GPES are expected to be separate.
1087
 * We disable wake-GPEs at run-time to prevent spurious
1088
 * interrupts.
1089
 *
1090
 * However, if a system exists that shares Wake and
1091
 * Run-time events on the same GPE this flag is available
1092
 * to tell Linux to keep the wake-time GPEs enabled at run-time.
1093
 */
1094
static int __init acpi_wake_gpes_always_on_setup(char *str)
1095
{
1096
        printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1097
 
1098
        acpi_gbl_leave_wake_gpes_disabled = FALSE;
1099
 
1100
        return 1;
1101
}
1102
 
1103
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1104
 
1105
/*
1106
 * Acquire a spinlock.
1107
 *
1108
 * handle is a pointer to the spinlock_t.
1109
 */
1110
 
1111
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1112
{
1113
        acpi_cpu_flags flags;
1114
        spin_lock_irqsave(lockp, flags);
1115
        return flags;
1116
}
1117
 
1118
/*
1119
 * Release a spinlock. See above.
1120
 */
1121
 
1122
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1123
{
1124
        spin_unlock_irqrestore(lockp, flags);
1125
}
1126
 
1127
#ifndef ACPI_USE_LOCAL_CACHE
1128
 
1129
/*******************************************************************************
1130
 *
1131
 * FUNCTION:    acpi_os_create_cache
1132
 *
1133
 * PARAMETERS:  name      - Ascii name for the cache
1134
 *              size      - Size of each cached object
1135
 *              depth     - Maximum depth of the cache (in objects) <ignored>
1136
 *              cache     - Where the new cache object is returned
1137
 *
1138
 * RETURN:      status
1139
 *
1140
 * DESCRIPTION: Create a cache object
1141
 *
1142
 ******************************************************************************/
1143
 
1144
acpi_status
1145
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1146
{
1147
        *cache = kmem_cache_create(name, size, 0, 0, NULL);
1148
        if (*cache == NULL)
1149
                return AE_ERROR;
1150
        else
1151
                return AE_OK;
1152
}
1153
 
1154
/*******************************************************************************
1155
 *
1156
 * FUNCTION:    acpi_os_purge_cache
1157
 *
1158
 * PARAMETERS:  Cache           - Handle to cache object
1159
 *
1160
 * RETURN:      Status
1161
 *
1162
 * DESCRIPTION: Free all objects within the requested cache.
1163
 *
1164
 ******************************************************************************/
1165
 
1166
acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1167
{
1168
        kmem_cache_shrink(cache);
1169
        return (AE_OK);
1170
}
1171
 
1172
/*******************************************************************************
1173
 *
1174
 * FUNCTION:    acpi_os_delete_cache
1175
 *
1176
 * PARAMETERS:  Cache           - Handle to cache object
1177
 *
1178
 * RETURN:      Status
1179
 *
1180
 * DESCRIPTION: Free all objects within the requested cache and delete the
1181
 *              cache object.
1182
 *
1183
 ******************************************************************************/
1184
 
1185
acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1186
{
1187
        kmem_cache_destroy(cache);
1188
        return (AE_OK);
1189
}
1190
 
1191
/*******************************************************************************
1192
 *
1193
 * FUNCTION:    acpi_os_release_object
1194
 *
1195
 * PARAMETERS:  Cache       - Handle to cache object
1196
 *              Object      - The object to be released
1197
 *
1198
 * RETURN:      None
1199
 *
1200
 * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1201
 *              the object is deleted.
1202
 *
1203
 ******************************************************************************/
1204
 
1205
acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1206
{
1207
        kmem_cache_free(cache, object);
1208
        return (AE_OK);
1209
}
1210
 
1211
/**
1212
 *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1213
 *
1214
 *      Returns 0 on success
1215
 */
1216
int acpi_dmi_dump(void)
1217
{
1218
 
1219
        if (!dmi_available)
1220
                return -1;
1221
 
1222
        printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1223
                dmi_get_slot(DMI_SYS_VENDOR));
1224
        printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1225
                dmi_get_slot(DMI_PRODUCT_NAME));
1226
        printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1227
                dmi_get_slot(DMI_PRODUCT_VERSION));
1228
        printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1229
                dmi_get_slot(DMI_BOARD_NAME));
1230
        printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1231
                dmi_get_slot(DMI_BIOS_VENDOR));
1232
        printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1233
                dmi_get_slot(DMI_BIOS_DATE));
1234
 
1235
        return 0;
1236
}
1237
 
1238
 
1239
/******************************************************************************
1240
 *
1241
 * FUNCTION:    acpi_os_validate_interface
1242
 *
1243
 * PARAMETERS:  interface           - Requested interface to be validated
1244
 *
1245
 * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1246
 *
1247
 * DESCRIPTION: Match an interface string to the interfaces supported by the
1248
 *              host. Strings originate from an AML call to the _OSI method.
1249
 *
1250
 *****************************************************************************/
1251
 
1252
acpi_status
1253
acpi_os_validate_interface (char *interface)
1254
{
1255
        if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1256
                return AE_OK;
1257
        if (!strcmp("Linux", interface)) {
1258
 
1259
                printk(KERN_NOTICE PREFIX
1260
                        "BIOS _OSI(Linux) query %s%s\n",
1261
                        osi_linux.enable ? "honored" : "ignored",
1262
                        osi_linux.cmdline ? " via cmdline" :
1263
                        osi_linux.dmi ? " via DMI" : "");
1264
 
1265
                if (!osi_linux.dmi) {
1266
                        if (acpi_dmi_dump())
1267
                                printk(KERN_NOTICE PREFIX
1268
                                        "[please extract dmidecode output]\n");
1269
                        printk(KERN_NOTICE PREFIX
1270
                                "Please send DMI info above to "
1271
                                "linux-acpi@vger.kernel.org\n");
1272
                }
1273
                if (!osi_linux.known && !osi_linux.cmdline) {
1274
                        printk(KERN_NOTICE PREFIX
1275
                                "If \"acpi_osi=%sLinux\" works better, "
1276
                                "please notify linux-acpi@vger.kernel.org\n",
1277
                                osi_linux.enable ? "!" : "");
1278
                }
1279
 
1280
                if (osi_linux.enable)
1281
                        return AE_OK;
1282
        }
1283
        return AE_SUPPORT;
1284
}
1285
 
1286
/******************************************************************************
1287
 *
1288
 * FUNCTION:    acpi_os_validate_address
1289
 *
1290
 * PARAMETERS:  space_id             - ACPI space ID
1291
 *              address             - Physical address
1292
 *              length              - Address length
1293
 *
1294
 * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1295
 *              should return AE_AML_ILLEGAL_ADDRESS.
1296
 *
1297
 * DESCRIPTION: Validate a system address via the host OS. Used to validate
1298
 *              the addresses accessed by AML operation regions.
1299
 *
1300
 *****************************************************************************/
1301
 
1302
acpi_status
1303
acpi_os_validate_address (
1304
    u8                   space_id,
1305
    acpi_physical_address   address,
1306
    acpi_size               length)
1307
{
1308
 
1309
    return AE_OK;
1310
}
1311
 
1312
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.